summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAreek Zillur <areek.zillur@elasticsearch.com>2016-10-03 13:59:56 -0400
committerAreek Zillur <areek.zillur@elasticsearch.com>2016-10-03 16:12:11 -0400
commit248ac240ed67d29113affa92a60d33f1b7f38a70 (patch)
tree677337e784ee66f143fe465f2f1450524050c903
parent14908f8726e698640565b197c204b879ab7fc2f6 (diff)
parent80bae2178f14552b378d5e8518f4111c50d89c33 (diff)
Merge branch 'master' into cleanup/transport_bulk
-rw-r--r--.gitignore5
-rw-r--r--CONTRIBUTING.md3
-rw-r--r--GRADLE.CHEATSHEET2
-rw-r--r--TESTING.asciidoc5
-rw-r--r--Vagrantfile11
-rw-r--r--benchmarks/build.gradle3
-rw-r--r--benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java7
-rw-r--r--benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java30
-rw-r--r--benchmarks/src/main/resources/log4j.properties8
-rw-r--r--benchmarks/src/main/resources/log4j2.properties8
-rw-r--r--build.gradle63
-rw-r--r--buildSrc/build.gradle5
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy48
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy34
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy83
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy69
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy34
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy3
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy52
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy52
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy32
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy21
-rw-r--r--buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestSpecHack.groovy8
-rw-r--r--buildSrc/src/main/resources/checkstyle_suppressions.xml51
-rw-r--r--buildSrc/version.properties8
-rw-r--r--client/benchmark/README.md49
-rw-r--r--client/benchmark/build.gradle6
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/AbstractBenchmark.java117
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java2
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkRunner.java33
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Metrics.java19
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/MetricsCalculator.java14
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Sample.java9
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/SampleRecorder.java11
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java34
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/search/SearchBenchmarkTask.java45
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java17
-rw-r--r--client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java15
-rw-r--r--client/benchmark/src/main/resources/log4j.properties9
-rw-r--r--client/benchmark/src/main/resources/log4j2.properties7
-rw-r--r--client/client-benchmark-noop-api-plugin/README.md23
-rw-r--r--client/client-benchmark-noop-api-plugin/build.gradle (renamed from plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java)21
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java49
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java (renamed from core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java)28
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java153
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java118
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java57
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java43
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java496
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java54
-rw-r--r--client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java58
-rw-r--r--client/rest/src/main/java/org/elasticsearch/client/Response.java2
-rw-r--r--client/rest/src/main/java/org/elasticsearch/client/ResponseException.java2
-rw-r--r--client/rest/src/main/java/org/elasticsearch/client/RestClient.java116
-rw-r--r--client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java60
-rw-r--r--client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java59
-rw-r--r--client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java98
-rw-r--r--client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java2
-rw-r--r--client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java81
-rw-r--r--client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java2
-rw-r--r--client/test/build.gradle6
-rw-r--r--client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java76
-rw-r--r--client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java7
-rw-r--r--core/build.gradle121
-rw-r--r--core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java665
-rw-r--r--core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java392
-rw-r--r--core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java2
-rw-r--r--core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java3
-rw-r--r--core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java6
-rw-r--r--core/src/main/java/org/elasticsearch/ElasticsearchException.java8
-rw-r--r--core/src/main/java/org/elasticsearch/ExceptionsHelper.java4
-rw-r--r--core/src/main/java/org/elasticsearch/Version.java27
-rw-r--r--core/src/main/java/org/elasticsearch/action/ActionModule.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java33
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java55
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java34
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java26
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java99
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java52
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java94
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java24
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java13
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java25
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java83
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java19
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java496
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java39
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java37
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java51
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java13
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java40
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java7
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java370
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java88
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java24
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java11
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java7
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java64
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java22
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/Retry.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java18
-rw-r--r--core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java56
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java172
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/GetRequest.java47
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/GetResponse.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java70
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java5
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/index/IndexRequest.java53
-rw-r--r--core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/index/IndexResponse.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/main/MainResponse.java11
-rw-r--r--core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java52
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java (renamed from core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java)19
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java18
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java (renamed from core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java)182
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java13
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java15
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java18
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java29
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java15
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/TransportAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java19
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java23
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java5
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java7
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java99
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java240
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java180
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java11
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java7
-rw-r--r--core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java43
-rw-r--r--core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java111
-rw-r--r--core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java72
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java98
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java143
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java (renamed from core/src/main/java/org/apache/log4j/Java9Hack.java)28
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java2
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java29
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java14
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/JarHell.java33
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/Natives.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/Security.java9
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/StartupException.java (renamed from core/src/main/java/org/elasticsearch/bootstrap/StartupError.java)46
-rw-r--r--core/src/main/java/org/elasticsearch/cli/Command.java8
-rw-r--r--core/src/main/java/org/elasticsearch/client/transport/TransportClient.java16
-rw-r--r--core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java29
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterModule.java116
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterState.java14
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java8
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java22
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java121
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java34
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java318
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java26
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java4
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java98
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java18
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java19
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java75
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java4
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java57
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java79
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java176
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java11
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java24
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java69
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java6
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java7
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java7
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java71
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java262
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java115
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java8
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java6
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java36
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java16
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java14
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java155
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java6
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java151
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java8
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java72
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedShard.java69
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java209
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java81
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java3
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/StaleShard.java54
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java51
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java205
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java12
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java11
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java5
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java16
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java6
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java48
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java7
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java21
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java3
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java8
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java84
-rw-r--r--core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java3
-rw-r--r--core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java2
-rw-r--r--core/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java2
-rw-r--r--core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java8
-rw-r--r--core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java10
-rw-r--r--core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java11
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java25
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java13
-rw-r--r--core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java6
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java30
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java67
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java53
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java260
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java25
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/ESLogger.java208
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java55
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java213
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/Loggers.java173
-rw-r--r--core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java70
-rw-r--r--core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java28
-rw-r--r--core/src/main/java/org/elasticsearch/common/lucene/Lucene.java23
-rw-r--r--core/src/main/java/org/elasticsearch/common/network/IfConfig.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/network/NetworkModule.java202
-rw-r--r--core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java10
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java18
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java21
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java2
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/Setting.java89
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java30
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java129
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java2
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/SizeValue.java43
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/TimeValue.java6
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java10
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java6
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java6
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java128
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java14
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java34
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java44
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/XContent.java27
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java1170
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java67
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java5
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java15
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java25
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java200
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java5
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java15
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java11
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java6
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java8
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java5
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java15
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java8
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java3
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java34
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java26
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java (renamed from core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java)141
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java36
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java163
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java14
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java30
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java6
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java5
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java72
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java76
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java75
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java41
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java6
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java20
-rw-r--r--core/src/main/java/org/elasticsearch/env/NodeEnvironment.java41
-rw-r--r--core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java2
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java10
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java88
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/Gateway.java9
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java31
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayModule.java1
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayService.java12
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java12
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java12
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/MetaStateService.java6
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java314
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java203
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java12
-rw-r--r--core/src/main/java/org/elasticsearch/http/HttpInfo.java39
-rw-r--r--core/src/main/java/org/elasticsearch/http/HttpServer.java3
-rw-r--r--core/src/main/java/org/elasticsearch/http/HttpServerTransport.java3
-rw-r--r--core/src/main/java/org/elasticsearch/http/HttpStats.java37
-rw-r--r--core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java7
-rw-r--r--core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java26
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexService.java40
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexSettings.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexWarmer.java13
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java15
-rw-r--r--core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java26
-rw-r--r--core/src/main/java/org/elasticsearch/index/SearchSlowLog.java12
-rw-r--r--core/src/main/java/org/elasticsearch/index/VersionType.java50
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/Analysis.java14
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java181
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java218
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java22
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java96
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/MinHashTokenFilterFactory.java57
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzer.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ScandinavianFoldingFilterFactory.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/codec/CodecService.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/Engine.java139
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java38
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java351
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java55
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java18
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/VersionValue.java13
-rw-r--r--core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java98
-rw-r--r--core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/fielddata/plain/LatLonPointDVAtomicFieldData.java91
-rw-r--r--core/src/main/java/org/elasticsearch/index/get/GetResult.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/get/ShardGetService.java173
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java32
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java185
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java43
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java25
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java7
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java42
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java77
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java155
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java (renamed from core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapperLegacy.java)54
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java16
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java40
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/LegacyTokenCountFieldMapper.java16
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/Mapper.java18
-rwxr-xr-xcore/src/main/java/org/elasticsearch/index/mapper/MapperService.java26
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java69
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java87
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java41
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java190
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java71
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java53
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java97
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java7
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java19
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java48
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java41
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java84
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java36
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java12
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/search/MatchQuery.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java16
-rw-r--r--core/src/main/java/org/elasticsearch/index/search/geo/LegacyInMemoryGeoBoundingBoxQuery.java (renamed from core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java)8
-rw-r--r--core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java (renamed from core/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxQuery.java)19
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java7
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexShard.java306
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java20
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java28
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java12
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/ShardPath.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java74
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java13
-rw-r--r--core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/store/IndexStore.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/store/Store.java47
-rw-r--r--core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java67
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java35
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/Translog.java105
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java17
-rw-r--r--core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java6
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java26
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesModule.java26
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java16
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java4
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesService.java43
-rw-r--r--core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java4
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java2
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java8
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java2
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java6
-rw-r--r--core/src/main/java/org/elasticsearch/indices/breaker/AllCircuitBreakerStats.java44
-rw-r--r--core/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerStats.java59
-rw-r--r--core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java26
-rw-r--r--core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java127
-rw-r--r--core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java10
-rw-r--r--core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java10
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java (renamed from core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java)9
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java (renamed from core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java)50
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java2
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java12
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java11
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java14
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java33
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java104
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java19
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java4
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java18
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java6
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java16
-rw-r--r--core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java12
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java13
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/IngestDocument.java29
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/IngestService.java7
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/Pipeline.java22
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/PipelineStore.java8
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/Processor.java9
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java44
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java6
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/fs/FsService.java4
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java8
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java349
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java503
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java9
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java72
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java31
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsService.java18
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsStats.java247
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java49
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java22
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java4
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java111
-rw-r--r--core/src/main/java/org/elasticsearch/node/Node.java154
-rw-r--r--core/src/main/java/org/elasticsearch/node/NodeValidationException.java44
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java61
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java3
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java7
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java2
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java69
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/Plugin.java55
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/PluginCli.java19
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/PluginInfo.java54
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/PluginsService.java69
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java23
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/SearchPlugin.java25
-rw-r--r--core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java14
-rw-r--r--core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java22
-rw-r--r--core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java61
-rw-r--r--core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java4
-rw-r--r--core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java22
-rw-r--r--core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java10
-rw-r--r--core/src/main/java/org/elasticsearch/rest/RestController.java11
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java4
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java15
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java6
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java3
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java119
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java12
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java23
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java12
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java97
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java18
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java2
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java19
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java12
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java25
-rw-r--r--core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/Script.java11
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptMetaData.java21
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptService.java23
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptSettings.java45
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptStats.java35
-rw-r--r--core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java (renamed from core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java)118
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchExtBuilder.java51
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchExtParser.java43
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java (renamed from core/src/main/java/org/elasticsearch/search/SearchParseElement.java)11
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchModule.java44
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchPhase.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java18
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchService.java150
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java13
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java22
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java21
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java27
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java14
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java13
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java15
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java9
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java9
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java35
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java13
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java13
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java15
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java117
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java25
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java26
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java65
-rw-r--r--core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java238
-rw-r--r--core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java43
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java15
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java31
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseContext.java49
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseParseElement.java48
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java195
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java31
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java26
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java53
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java11
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/VersionFetchSubPhase.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java17
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java62
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java81
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java99
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/SearchContext.java71
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java48
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QueryPhase.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java21
-rw-r--r--core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java1
-rw-r--r--core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java22
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java1
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java15
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion2x/context/GeolocationContextMapping.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java2
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/RestoreService.java31
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java6
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java8
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java20
-rw-r--r--core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java8
-rw-r--r--core/src/main/java/org/elasticsearch/tasks/TaskInfo.java2
-rw-r--r--core/src/main/java/org/elasticsearch/tasks/TaskManager.java12
-rw-r--r--core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java6
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java88
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java44
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java102
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TcpTransport.java54
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java14
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportInfo.java71
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java60
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportService.java133
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportStats.java59
-rw-r--r--core/src/main/java/org/elasticsearch/transport/Transports.java7
-rw-r--r--core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java19
-rw-r--r--core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java6
-rw-r--r--core/src/main/java/org/elasticsearch/tribe/TribeService.java15
-rw-r--r--core/src/main/java/org/elasticsearch/watcher/FileWatcher.java4
-rw-r--r--core/src/main/resources/org/elasticsearch/bootstrap/security.policy4
-rw-r--r--core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy4
-rw-r--r--core/src/test/java/org/apache/log4j/Java9HackTests.java28
-rw-r--r--core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java96
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java23
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java364
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java95
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java24
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java19
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java151
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java350
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java19
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java57
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java52
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java18
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/main/MainActionTests.java28
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java (renamed from core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java)3
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java12
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java56
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java315
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTestHelper.java55
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java116
-rw-r--r--core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java259
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java213
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java114
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java46
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java11
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java143
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java3
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java99
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java9
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java18
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java8
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java138
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java26
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java66
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java45
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java3
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RecoverySourceTests.java53
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java14
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java153
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java64
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java102
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java307
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java145
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java21
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java269
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java39
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java635
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java50
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java392
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java84
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java293
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java523
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java230
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java31
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java40
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java96
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java162
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java63
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java321
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java160
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java27
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java136
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java339
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java116
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java161
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java164
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java12
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java34
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java8
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/codecs/CodecTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java169
-rw-r--r--core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java196
-rw-r--r--core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java196
-rw-r--r--core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java88
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/SettingTests.java56
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java28
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java28
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java161
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java752
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTestCase.java315
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java119
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java87
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java23
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java46
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java121
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java (renamed from core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java)50
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java5
-rw-r--r--core/src/test/java/org/elasticsearch/document/ShardInfoIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java10
-rw-r--r--core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java54
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java13
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java105
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java6
-rw-r--r--core/src/test/java/org/elasticsearch/get/GetActionIT.java132
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexServiceTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java86
-rw-r--r--core/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/index/VersionTypeTests.java40
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java (renamed from core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java)89
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java15
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java17
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/MinHashFilterFactoryTests.java71
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java22
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/index/codec/CodecTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java588
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java58
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java (renamed from core/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java)28
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java25
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java22
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java37
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java47
-rwxr-xr-xcore/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java28
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java17
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/GeoEncodingTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java457
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java12
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java40
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/GeohashMappingGeoPointTests.java171
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java458
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java46
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java75
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java21
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java161
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java39
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/TTLFieldMapperTests.java50
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java47
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java58
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java12
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java63
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java25
-rw-r--r--core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java441
-rw-r--r--core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java93
-rw-r--r--core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java476
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java1468
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java20
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java55
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/StoreTests.java54
-rw-r--r--core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java170
-rw-r--r--core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java35
-rw-r--r--core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java6
-rw-r--r--core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java14
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java2
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java33
-rw-r--r--core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java38
-rw-r--r--core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java21
-rw-r--r--core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java5
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java15
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java79
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java17
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java110
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java10
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java22
-rw-r--r--core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java83
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java61
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java40
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java37
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java22
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java1
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java55
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java129
-rw-r--r--core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java8
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java17
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java38
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RelocationIT.java30
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java6
-rw-r--r--core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java31
-rw-r--r--core/src/test/java/org/elasticsearch/rest/DeprecationRestHandlerTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java26
-rw-r--r--core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java32
-rw-r--r--core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java36
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java22
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java29
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java35
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java512
-rw-r--r--core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java (renamed from core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java)3
-rw-r--r--core/src/test/java/org/elasticsearch/search/SearchRequestTests.java72
-rw-r--r--core/src/test/java/org/elasticsearch/search/SearchServiceTests.java83
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java81
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java141
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java65
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java9
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java6
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java41
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java27
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java53
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java447
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java145
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java149
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java66
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighterTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java31
-rw-r--r--core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java17
-rw-r--r--core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java60
-rw-r--r--core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java10
-rw-r--r--core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java126
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java30
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java9
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java7
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java14
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java72
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java40
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java7
-rw-r--r--core/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java122
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java6
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java28
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearch2xIT.java22
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java4
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java2
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java24
-rw-r--r--core/src/test/java/org/elasticsearch/test/MockLogAppender.java34
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java8
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java23
-rw-r--r--core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/tribe/TribeIT.java696
-rw-r--r--core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java30
-rw-r--r--core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java8
-rw-r--r--core/src/test/java/org/elasticsearch/update/UpdateIT.java32
-rw-r--r--core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java37
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.0-beta1.zipbin110178 -> 106104 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.0-beta2.zipbin79784 -> 99305 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.0-rc1.zipbin105565 -> 104859 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.0.zipbin91279 -> 108919 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.1.zipbin96161 -> 103862 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.0.2.zipbin87794 -> 88831 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.1.0.zipbin91977 -> 115185 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.1.1.zipbin80455 -> 101910 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.1.2.zipbin79960 -> 92490 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.2.0.zipbin72250 -> 181364 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.2.1.zipbin90922 -> 106340 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.2.2.zipbin82018 -> 88371 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.0.zipbin95755 -> 94861 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.1.zipbin103526 -> 111198 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.2.zipbin91312 -> 81903 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.3.zipbin77612 -> 79739 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.4.zipbin79323 -> 105772 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.3.5.zipbin106326 -> 83183 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-2.4.0.zipbin0 -> 253483 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zipbin108005 -> 95154 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zipbin77676 -> 93982 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zipbin103590 -> 96233 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.0.zipbin89310 -> 104049 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.1.zipbin94121 -> 99125 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.0.2.zipbin85663 -> 80050 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.1.0.zipbin89900 -> 110383 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.1.1.zipbin78506 -> 97232 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.1.2.zipbin78021 -> 87833 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.2.0.zipbin70304 -> 98427 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.2.1.zipbin88759 -> 101543 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.2.2.zipbin79920 -> 77185 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.0.zipbin93771 -> 90018 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.1.zipbin101479 -> 101309 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.2.zipbin89331 -> 77228 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.3.zipbin75600 -> 75123 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.4.zipbin77302 -> 96576 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.3.5.zipbin104147 -> 78548 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-2.4.0.zipbin0 -> 120791 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml13
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml10
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml10
-rw-r--r--dev-tools/create_bwc_index.py56
-rw-r--r--dev-tools/smoke_test_rc.py34
-rw-r--r--distribution/build.gradle33
-rw-r--r--distribution/integ-test-zip/build.gradle25
-rw-r--r--distribution/licenses/apache-log4j-extras-1.2.17.jar.sha11
-rw-r--r--distribution/licenses/apache-log4j-extras-NOTICE5
-rw-r--r--distribution/licenses/log4j-1.2-api-2.6.2.jar.sha11
-rw-r--r--distribution/licenses/log4j-1.2.17.jar.sha11
-rw-r--r--distribution/licenses/log4j-LICENSE.txt (renamed from distribution/licenses/apache-log4j-extras-LICENSE)0
-rw-r--r--distribution/licenses/log4j-NOTICE.txt (renamed from distribution/licenses/log4j-NOTICE)0
-rw-r--r--distribution/licenses/log4j-api-2.6.2.jar.sha11
-rw-r--r--distribution/licenses/log4j-api-LICENSE.txt (renamed from distribution/licenses/log4j-LICENSE)0
-rw-r--r--distribution/licenses/log4j-api-NOTICE.txt5
-rw-r--r--distribution/licenses/log4j-core-2.6.2.jar.sha11
-rw-r--r--distribution/licenses/log4j-core-LICENSE.txt (renamed from plugins/mapper-attachments/licenses/commons-codec-LICENSE.txt)4
-rw-r--r--distribution/licenses/log4j-core-NOTICE.txt5
-rw-r--r--distribution/licenses/lucene-analyzers-common-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-backward-codecs-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-core-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-core-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-grouping-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-grouping-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-highlighter-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-highlighter-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-join-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-join-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-memory-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-memory-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-misc-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-misc-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-queries-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-queries-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-queryparser-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-queryparser-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-sandbox-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-sandbox-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial-extras-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial3d-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-spatial3d-6.2.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-suggest-6.1.0.jar.sha11
-rw-r--r--distribution/licenses/lucene-suggest-6.2.0.jar.sha11
-rw-r--r--distribution/rpm/build.gradle11
-rw-r--r--distribution/src/main/packaging/scripts/prerm11
-rw-r--r--distribution/src/main/packaging/systemd/elasticsearch.service7
-rwxr-xr-xdistribution/src/main/resources/bin/elasticsearch29
-rwxr-xr-xdistribution/src/main/resources/bin/elasticsearch-plugin5
-rw-r--r--distribution/src/main/resources/bin/elasticsearch-plugin.bat41
-rw-r--r--distribution/src/main/resources/bin/elasticsearch-service.bat (renamed from distribution/src/main/resources/bin/service.bat)27
-rw-r--r--distribution/src/main/resources/bin/elasticsearch.bat25
-rw-r--r--distribution/src/main/resources/config/elasticsearch.yml26
-rw-r--r--distribution/src/main/resources/config/jvm.options16
-rw-r--r--distribution/src/main/resources/config/log4j2.properties74
-rw-r--r--distribution/src/main/resources/config/logging.yml86
-rw-r--r--distribution/tar/build.gradle4
-rw-r--r--distribution/zip/build.gradle26
-rw-r--r--docs/README.asciidoc3
-rw-r--r--docs/build.gradle187
-rw-r--r--docs/groovy-api/anatomy.asciidoc10
-rw-r--r--docs/groovy-api/client.asciidoc4
-rw-r--r--docs/groovy-api/delete.asciidoc2
-rw-r--r--docs/groovy-api/get.asciidoc2
-rw-r--r--docs/groovy-api/index_.asciidoc2
-rw-r--r--docs/groovy-api/search.asciidoc16
-rw-r--r--docs/java-api/index.asciidoc2
-rw-r--r--docs/java-api/query-dsl/geo-queries.asciidoc8
-rw-r--r--docs/java-api/query-dsl/geohash-cell-query.asciidoc17
-rw-r--r--docs/java-api/query-dsl/script-query.asciidoc4
-rw-r--r--docs/java-rest/index.asciidoc2
-rw-r--r--docs/java-rest/usage.asciidoc87
-rw-r--r--docs/plugins/analysis-icu.asciidoc5
-rw-r--r--docs/plugins/analysis-kuromoji.asciidoc8
-rw-r--r--docs/plugins/analysis-phonetic.asciidoc5
-rw-r--r--docs/plugins/analysis-smartcn.asciidoc5
-rw-r--r--docs/plugins/analysis-stempel.asciidoc5
-rw-r--r--docs/plugins/discovery-azure-classic.asciidoc43
-rw-r--r--docs/plugins/discovery-ec2.asciidoc8
-rw-r--r--docs/plugins/discovery-file.asciidoc93
-rw-r--r--docs/plugins/discovery-gce.asciidoc12
-rw-r--r--docs/plugins/discovery.asciidoc6
-rw-r--r--docs/plugins/index.asciidoc33
-rw-r--r--docs/plugins/ingest-attachment.asciidoc5
-rw-r--r--docs/plugins/ingest-geoip.asciidoc53
-rw-r--r--docs/plugins/ingest-user-agent.asciidoc5
-rw-r--r--docs/plugins/lang-javascript.asciidoc9
-rw-r--r--docs/plugins/lang-python.asciidoc7
-rw-r--r--docs/plugins/mapper-attachments.asciidoc418
-rw-r--r--docs/plugins/mapper-murmur3.asciidoc5
-rw-r--r--docs/plugins/mapper-size.asciidoc5
-rw-r--r--docs/plugins/mapper.asciidoc7
-rw-r--r--docs/plugins/plugin-script.asciidoc2
-rw-r--r--docs/plugins/repository-azure.asciidoc5
-rw-r--r--docs/plugins/repository-gcs.asciidoc6
-rw-r--r--docs/plugins/repository-hdfs.asciidoc5
-rw-r--r--docs/plugins/repository-s3.asciidoc10
-rw-r--r--docs/plugins/store-smb.asciidoc5
-rw-r--r--docs/python/index.asciidoc2
-rw-r--r--docs/reference/aggregations.asciidoc1
-rw-r--r--docs/reference/aggregations/bucket/children-aggregation.asciidoc283
-rw-r--r--docs/reference/aggregations/bucket/histogram-aggregation.asciidoc12
-rw-r--r--docs/reference/aggregations/bucket/terms-aggregation.asciidoc42
-rw-r--r--docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc4
-rw-r--r--docs/reference/aggregations/metrics/tophits-aggregation.asciidoc1
-rw-r--r--docs/reference/aggregations/pipeline.asciidoc14
-rw-r--r--docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc2
-rw-r--r--docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc4
-rw-r--r--docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc2
-rw-r--r--docs/reference/analysis/analyzers/pattern-analyzer.asciidoc17
-rw-r--r--docs/reference/analysis/analyzers/standard-analyzer.asciidoc2
-rw-r--r--docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc14
-rw-r--r--docs/reference/analysis/tokenfilters.asciidoc4
-rw-r--r--docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc22
-rw-r--r--docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc14
-rw-r--r--docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc14
-rw-r--r--docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc2
-rw-r--r--docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc14
-rw-r--r--docs/reference/api-conventions.asciidoc244
-rw-r--r--docs/reference/cat.asciidoc12
-rw-r--r--docs/reference/cat/allocation.asciidoc6
-rw-r--r--docs/reference/cat/fielddata.asciidoc36
-rw-r--r--docs/reference/cat/indices.asciidoc4
-rw-r--r--docs/reference/cat/master.asciidoc8
-rw-r--r--docs/reference/cat/nodeattrs.asciidoc20
-rw-r--r--docs/reference/cat/nodes.asciidoc69
-rw-r--r--docs/reference/cat/plugins.asciidoc4
-rw-r--r--docs/reference/cat/recovery.asciidoc13
-rw-r--r--docs/reference/cat/shards.asciidoc46
-rw-r--r--docs/reference/cat/templates.asciidoc20
-rw-r--r--docs/reference/cluster.asciidoc19
-rw-r--r--docs/reference/cluster/allocation-explain.asciidoc14
-rw-r--r--docs/reference/cluster/health.asciidoc14
-rw-r--r--docs/reference/cluster/stats.asciidoc12
-rw-r--r--docs/reference/cluster/tasks.asciidoc2
-rw-r--r--docs/reference/docs/bulk.asciidoc92
-rw-r--r--docs/reference/docs/delete-by-query.asciidoc77
-rw-r--r--docs/reference/docs/get.asciidoc183
-rw-r--r--docs/reference/docs/index_.asciidoc2
-rw-r--r--docs/reference/docs/multi-get.asciidoc14
-rw-r--r--docs/reference/docs/reindex.asciidoc124
-rw-r--r--docs/reference/docs/update-by-query.asciidoc60
-rw-r--r--docs/reference/docs/update.asciidoc130
-rwxr-xr-xdocs/reference/getting-started.asciidoc253
-rw-r--r--docs/reference/how-to/indexing-speed.asciidoc9
-rw-r--r--docs/reference/how-to/search-speed.asciidoc118
-rw-r--r--docs/reference/images/lambda_calc.pngbin920 -> 1419 bytes
-rw-r--r--docs/reference/images/sigma_calc.pngbin1236 -> 1731 bytes
-rw-r--r--docs/reference/index-modules.asciidoc6
-rw-r--r--docs/reference/index-modules/slowlog.asciidoc24
-rw-r--r--docs/reference/index.asciidoc27
-rw-r--r--docs/reference/indices/aliases.asciidoc21
-rw-r--r--docs/reference/indices/analyze.asciidoc9
-rw-r--r--docs/reference/indices/clearcache.asciidoc11
-rw-r--r--docs/reference/indices/delete-index.asciidoc4
-rw-r--r--docs/reference/indices/forcemerge.asciidoc10
-rw-r--r--docs/reference/indices/get-field-mapping.asciidoc35
-rw-r--r--docs/reference/indices/get-index.asciidoc12
-rw-r--r--docs/reference/indices/get-mapping.asciidoc16
-rw-r--r--docs/reference/indices/get-settings.asciidoc13
-rw-r--r--docs/reference/indices/indices-exists.asciidoc4
-rw-r--r--docs/reference/indices/open-close.asciidoc6
-rw-r--r--docs/reference/indices/recovery.asciidoc18
-rw-r--r--docs/reference/indices/refresh.asciidoc10
-rw-r--r--docs/reference/indices/rollover-index.asciidoc40
-rw-r--r--docs/reference/indices/shrink-index.asciidoc12
-rw-r--r--docs/reference/indices/stats.asciidoc16
-rw-r--r--docs/reference/indices/templates.asciidoc33
-rw-r--r--docs/reference/indices/types-exists.asciidoc4
-rw-r--r--docs/reference/ingest/ingest-node.asciidoc184
-rw-r--r--docs/reference/mapping.asciidoc23
-rw-r--r--docs/reference/mapping/dynamic-mapping.asciidoc27
-rw-r--r--docs/reference/mapping/dynamic/field-mapping.asciidoc14
-rw-r--r--docs/reference/mapping/fields/all-field.asciidoc6
-rw-r--r--docs/reference/mapping/fields/field-names-field.asciidoc15
-rw-r--r--docs/reference/mapping/fields/id-field.asciidoc2
-rw-r--r--docs/reference/mapping/fields/index-field.asciidoc2
-rw-r--r--docs/reference/mapping/fields/parent-field.asciidoc4
-rw-r--r--docs/reference/mapping/fields/routing-field.asciidoc14
-rw-r--r--docs/reference/mapping/fields/type-field.asciidoc2
-rw-r--r--docs/reference/mapping/fields/uid-field.asciidoc2
-rw-r--r--docs/reference/mapping/params.asciidoc12
-rw-r--r--docs/reference/mapping/params/geohash-precision.asciidoc60
-rw-r--r--docs/reference/mapping/params/geohash-prefix.asciidoc64
-rw-r--r--docs/reference/mapping/params/geohash.asciidoc70
-rw-r--r--docs/reference/mapping/params/lat-lon.asciidoc72
-rw-r--r--docs/reference/mapping/types.asciidoc6
-rw-r--r--docs/reference/mapping/types/geo-point.asciidoc23
-rw-r--r--docs/reference/mapping/types/keyword.asciidoc5
-rw-r--r--docs/reference/mapping/types/nested.asciidoc4
-rw-r--r--docs/reference/mapping/types/percolator.asciidoc5
-rw-r--r--docs/reference/mapping/types/string.asciidoc16
-rw-r--r--docs/reference/mapping/types/text.asciidoc8
-rw-r--r--docs/reference/migration/index.asciidoc8
-rw-r--r--docs/reference/migration/migrate_5_0.asciidoc129
-rw-r--r--docs/reference/migration/migrate_5_0/aggregations.asciidoc33
-rw-r--r--docs/reference/migration/migrate_5_0/allocation.asciidoc59
-rw-r--r--docs/reference/migration/migrate_5_0/cat.asciidoc47
-rw-r--r--docs/reference/migration/migrate_5_0/docs.asciidoc57
-rw-r--r--docs/reference/migration/migrate_5_0/fs.asciidoc31
-rw-r--r--docs/reference/migration/migrate_5_0/http.asciidoc9
-rw-r--r--docs/reference/migration/migrate_5_0/index-apis.asciidoc51
-rw-r--r--docs/reference/migration/migrate_5_0/java.asciidoc402
-rw-r--r--docs/reference/migration/migrate_5_0/mapping.asciidoc264
-rw-r--r--docs/reference/migration/migrate_5_0/packaging.asciidoc65
-rw-r--r--docs/reference/migration/migrate_5_0/percolator.asciidoc111
-rw-r--r--docs/reference/migration/migrate_5_0/plugins.asciidoc161
-rw-r--r--docs/reference/migration/migrate_5_0/rest.asciidoc86
-rw-r--r--docs/reference/migration/migrate_5_0/scripting.asciidoc341
-rw-r--r--docs/reference/migration/migrate_5_0/search.asciidoc212
-rw-r--r--docs/reference/migration/migrate_5_0/settings.asciidoc323
-rw-r--r--docs/reference/migration/migrate_5_0/suggest.asciidoc91
-rw-r--r--docs/reference/migration/migrate_6_0.asciidoc41
-rw-r--r--docs/reference/migration/migrate_6_0/cluster.asciidoc27
-rw-r--r--docs/reference/migration/migrate_6_0/docs.asciidoc7
-rw-r--r--docs/reference/migration/migrate_6_0/plugins.asciidoc7
-rw-r--r--docs/reference/migration/migrate_6_0/rest.asciidoc9
-rw-r--r--docs/reference/migration/migrate_6_0/search.asciidoc7
-rw-r--r--docs/reference/modules/cluster/misc.asciidoc20
-rw-r--r--docs/reference/modules/cluster/shards_allocation.asciidoc6
-rw-r--r--docs/reference/modules/indices/request_cache.asciidoc25
-rw-r--r--docs/reference/modules/scripting.asciidoc7
-rw-r--r--docs/reference/modules/scripting/groovy.asciidoc2
-rw-r--r--docs/reference/modules/scripting/painless-syntax.asciidoc26
-rw-r--r--docs/reference/modules/scripting/painless.asciidoc11
-rw-r--r--docs/reference/modules/scripting/using.asciidoc2
-rw-r--r--docs/reference/modules/snapshots.asciidoc6
-rw-r--r--docs/reference/modules/transport.asciidoc4
-rw-r--r--docs/reference/query-dsl/function-score-query.asciidoc4
-rw-r--r--docs/reference/query-dsl/geo-distance-query.asciidoc2
-rw-r--r--docs/reference/query-dsl/geo-queries.asciidoc7
-rw-r--r--docs/reference/query-dsl/geohash-cell-query.asciidoc78
-rw-r--r--docs/reference/query-dsl/has-child-query.asciidoc31
-rw-r--r--docs/reference/query-dsl/has-parent-query.asciidoc31
-rw-r--r--docs/reference/query-dsl/match-phrase-prefix-query.asciidoc2
-rw-r--r--docs/reference/query-dsl/percolate-query.asciidoc18
-rw-r--r--docs/reference/query-dsl/span-field-masking-query.asciidoc43
-rw-r--r--docs/reference/query-dsl/span-queries.asciidoc5
-rw-r--r--docs/reference/query-dsl/wildcard-query.asciidoc2
-rw-r--r--docs/reference/redirects.asciidoc15
-rw-r--r--docs/reference/release-notes.asciidoc16
-rw-r--r--docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc604
-rw-r--r--docs/reference/release-notes/5.0.0-alpha1.asciidoc810
-rw-r--r--docs/reference/release-notes/5.0.0-alpha2.asciidoc268
-rw-r--r--docs/reference/release-notes/5.0.0-alpha3.asciidoc339
-rw-r--r--docs/reference/release-notes/5.0.0-alpha4.asciidoc361
-rw-r--r--docs/reference/release-notes/5.0.0-alpha5.asciidoc463
-rw-r--r--docs/reference/search.asciidoc14
-rw-r--r--docs/reference/search/explain.asciidoc67
-rw-r--r--docs/reference/search/multi-search.asciidoc33
-rw-r--r--docs/reference/search/percolate.asciidoc4
-rw-r--r--docs/reference/search/profile.asciidoc174
-rw-r--r--docs/reference/search/request-body.asciidoc30
-rw-r--r--docs/reference/search/request/rescore.asciidoc4
-rw-r--r--docs/reference/search/request/scroll.asciidoc20
-rw-r--r--docs/reference/search/request/sort.asciidoc18
-rw-r--r--docs/reference/search/request/source-filtering.asciidoc2
-rw-r--r--docs/reference/search/request/stored-fields.asciidoc25
-rw-r--r--docs/reference/search/search-shards.asciidoc82
-rw-r--r--docs/reference/search/search-template.asciidoc38
-rw-r--r--docs/reference/search/search.asciidoc20
-rw-r--r--docs/reference/search/suggesters/completion-suggest.asciidoc44
-rw-r--r--docs/reference/search/suggesters/context-suggest.asciidoc3
-rw-r--r--docs/reference/search/suggesters/phrase-suggest.asciidoc12
-rw-r--r--docs/reference/search/validate.asciidoc1
-rw-r--r--docs/reference/setup/bootstrap-checks.asciidoc44
-rw-r--r--docs/reference/setup/configuration.asciidoc79
-rw-r--r--docs/reference/setup/install/check-running.asciidoc22
-rw-r--r--docs/reference/setup/install/deb.asciidoc12
-rw-r--r--docs/reference/setup/install/rpm.asciidoc10
-rw-r--r--docs/reference/setup/install/sysconfig-file.asciidoc2
-rw-r--r--docs/reference/setup/install/systemd.asciidoc29
-rw-r--r--docs/reference/setup/install/windows.asciidoc16
-rw-r--r--docs/reference/setup/install/zip-targz.asciidoc15
-rw-r--r--docs/reference/setup/stopping.asciidoc2
-rw-r--r--docs/reference/setup/sysconfig/file-descriptors.asciidoc3
-rw-r--r--docs/reference/setup/sysconfig/heap_size.asciidoc8
-rw-r--r--docs/reference/setup/sysconfig/swap.asciidoc15
-rw-r--r--docs/reference/testing/testing-framework.asciidoc4
-rw-r--r--docs/resiliency/index.asciidoc18
-rw-r--r--modules/build.gradle4
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java29
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java2
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java27
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java120
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java2
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java36
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java2
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java82
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java8
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java35
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java20
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java8
-rw-r--r--modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java8
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java64
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java19
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java59
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java100
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java144
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java4
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java17
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java56
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java69
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java76
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java14
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java4
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java15
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java51
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java4
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java4
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java14
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java4
-rw-r--r--modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java4
-rw-r--r--modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml30
-rw-r--r--modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml40
-rw-r--r--modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yaml40
-rw-r--r--modules/lang-expression/licenses/lucene-expressions-6.1.0.jar.sha11
-rw-r--r--modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha11
-rw-r--r--modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java22
-rw-r--r--modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java14
-rw-r--r--modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java18
-rw-r--r--modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/25_script_upsert.yaml3
-rw-r--r--modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/90_missing.yaml1
-rw-r--r--modules/lang-mustache/build.gradle2
-rw-r--r--modules/lang-mustache/licenses/compiler-0.9.1.jar.sha11
-rw-r--r--modules/lang-mustache/licenses/compiler-0.9.3.jar.sha11
-rw-r--r--modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java4
-rw-r--r--modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java2
-rw-r--r--modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java22
-rw-r--r--modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java16
-rw-r--r--modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java7
-rw-r--r--modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yaml16
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java30
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java13
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java27
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java5
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java4
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java2
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java2
-rw-r--r--modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java2
-rw-r--r--modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java196
-rw-r--r--modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java11
-rw-r--r--modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java10
-rw-r--r--modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java17
-rw-r--r--modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java15
-rw-r--r--modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml33
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java5
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java4
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java40
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java1
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java5
-rw-r--r--modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java23
-rw-r--r--modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java59
-rw-r--r--modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java138
-rw-r--r--modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java11
-rw-r--r--modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zipbin9772 -> 11852 bytes
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java6
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java4
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java4
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java16
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java6
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java6
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java4
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java4
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java4
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java8
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java67
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java16
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java6
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java2
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java12
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java8
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java41
-rw-r--r--modules/reindex/src/test/resources/responses/main/with_unknown_fields.json22
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml4
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml4
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml4
-rw-r--r--modules/transport-netty3/build.gradle2
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java60
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java34
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java6
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java6
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java56
-rw-r--r--modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java6
-rw-r--r--modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java3
-rw-r--r--modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java5
-rw-r--r--modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportIT.java19
-rw-r--r--modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java3
-rw-r--r--modules/transport-netty3/src/test/resources/rest-api-spec/test/10_basic.yaml6
-rw-r--r--modules/transport-netty4/build.gradle26
-rw-r--r--modules/transport-netty4/licenses/netty-buffer-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-codec-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-codec-http-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-common-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-handler-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-resolver-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-transport-4.1.4.Final.jar.sha11
-rw-r--r--modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha11
-rw-r--r--modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java52
-rw-r--r--modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java32
-rw-r--r--modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java4
-rw-r--r--modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java8
-rw-r--r--modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java39
-rw-r--r--modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy2
-rw-r--r--modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java54
-rw-r--r--modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java5
-rw-r--r--modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java19
-rw-r--r--modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java3
-rw-r--r--modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yaml6
-rw-r--r--plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0.jar.sha11
-rw-r--r--plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha11
-rw-r--r--plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java4
-rw-r--r--plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java16
-rw-r--r--plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java14
-rw-r--r--plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java36
-rw-r--r--plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java8
-rw-r--r--plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0.jar.sha11
-rw-r--r--plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha11
-rw-r--r--plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java4
-rw-r--r--plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java2
-rw-r--r--plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java79
-rw-r--r--plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0.jar.sha11
-rw-r--r--plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha11
-rw-r--r--plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/Nysiis.java2
-rw-r--r--plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java4
-rw-r--r--plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0.jar.sha11
-rw-r--r--plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha11
-rw-r--r--plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java4
-rw-r--r--plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0.jar.sha11
-rw-r--r--plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha11
-rw-r--r--plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java6
-rw-r--r--plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java8
-rw-r--r--plugins/build.gradle1
-rw-r--r--plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java6
-rw-r--r--plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java4
-rw-r--r--plugins/discovery-ec2/build.gradle6
-rw-r--r--plugins/discovery-ec2/config/discovery-ec2/log4j2.properties8
-rw-r--r--plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java10
-rw-r--r--plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java4
-rw-r--r--plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java7
-rw-r--r--plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java35
-rw-r--r--plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java4
-rw-r--r--plugins/discovery-file/build.gradle59
-rw-r--r--plugins/discovery-file/config/discovery-file/unicast_hosts.txt20
-rw-r--r--plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java51
-rw-r--r--plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java109
-rw-r--r--plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java (renamed from plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MapperAttachmentsClientYamlTestSuiteIT.java)13
-rw-r--r--plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java140
-rw-r--r--plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yaml13
-rw-r--r--plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java4
-rw-r--r--plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java4
-rw-r--r--plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java5
-rw-r--r--plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java5
-rw-r--r--plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java4
-rw-r--r--plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java5
-rw-r--r--plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java4
-rw-r--r--plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java61
-rw-r--r--plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java69
-rw-r--r--plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yaml73
-rw-r--r--plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExamplePluginConfiguration.java43
-rw-r--r--plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java7
-rw-r--r--plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java12
-rw-r--r--plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java6
-rw-r--r--plugins/mapper-attachments/build.gradle2051
-rw-r--r--plugins/mapper-attachments/licenses/bcmail-jdk15on-1.54.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/bcmail-jdk15on-LICENSE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/bcpkix-jdk15on-1.54.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/bcpkix-jdk15on-LICENSE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/bcpkix-jdk15on-NOTICE.txt0
-rw-r--r--plugins/mapper-attachments/licenses/bcprov-jdk15on-1.54.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/bcprov-jdk15on-LICENSE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/bcprov-jdk15on-NOTICE.txt0
-rw-r--r--plugins/mapper-attachments/licenses/commons-codec-1.10.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/commons-codec-NOTICE.txt5
-rw-r--r--plugins/mapper-attachments/licenses/commons-compress-1.10.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/commons-compress-LICENSE.txt201
-rw-r--r--plugins/mapper-attachments/licenses/commons-compress-NOTICE.txt11
-rw-r--r--plugins/mapper-attachments/licenses/commons-io-2.4.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/commons-io-LICENSE.txt202
-rw-r--r--plugins/mapper-attachments/licenses/commons-io-NOTICE.txt5
-rw-r--r--plugins/mapper-attachments/licenses/commons-logging-1.1.3.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/commons-logging-LICENSE.txt202
-rw-r--r--plugins/mapper-attachments/licenses/commons-logging-NOTICE.txt5
-rw-r--r--plugins/mapper-attachments/licenses/fontbox-2.0.1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/fontbox-LICENSE.txt344
-rw-r--r--plugins/mapper-attachments/licenses/fontbox-NOTICE.txt22
-rw-r--r--plugins/mapper-attachments/licenses/jempbox-1.8.12.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/jempbox-LICENSE.txt25
-rw-r--r--plugins/mapper-attachments/licenses/jempbox-NOTICE.txt0
-rw-r--r--plugins/mapper-attachments/licenses/juniversalchardet-1.0.3.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/juniversalchardet-LICENSE.txt469
-rw-r--r--plugins/mapper-attachments/licenses/juniversalchardet-NOTICE.txt0
-rw-r--r--plugins/mapper-attachments/licenses/pdfbox-2.0.1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/pdfbox-LICENSE.txt344
-rw-r--r--plugins/mapper-attachments/licenses/pdfbox-NOTICE.txt22
-rw-r--r--plugins/mapper-attachments/licenses/poi-3.15-beta1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/poi-LICENSE.txt463
-rw-r--r--plugins/mapper-attachments/licenses/poi-NOTICE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-3.15-beta1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-LICENSE.txt463
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-NOTICE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-schemas-LICENSE.txt463
-rw-r--r--plugins/mapper-attachments/licenses/poi-ooxml-schemas-NOTICE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/poi-scratchpad-3.15-beta1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/poi-scratchpad-LICENSE.txt463
-rw-r--r--plugins/mapper-attachments/licenses/poi-scratchpad-NOTICE.txt23
-rw-r--r--plugins/mapper-attachments/licenses/tagsoup-1.2.1.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/tagsoup-LICENSE.txt201
-rw-r--r--plugins/mapper-attachments/licenses/tagsoup-NOTICE.txt0
-rw-r--r--plugins/mapper-attachments/licenses/tika-core-1.13.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/tika-core-LICENSE.txt372
-rw-r--r--plugins/mapper-attachments/licenses/tika-core-NOTICE.txt17
-rw-r--r--plugins/mapper-attachments/licenses/tika-parsers-1.13.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/tika-parsers-LICENSE.txt372
-rw-r--r--plugins/mapper-attachments/licenses/tika-parsers-NOTICE.txt17
-rw-r--r--plugins/mapper-attachments/licenses/xmlbeans-2.6.0.jar.sha11
-rw-r--r--plugins/mapper-attachments/licenses/xmlbeans-LICENSE.txt201
-rw-r--r--plugins/mapper-attachments/licenses/xmlbeans-NOTICE.txt29
-rw-r--r--plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java656
-rw-r--r--plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java53
-rw-r--r--plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java161
-rw-r--r--plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy32
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/AttachmentUnitTestCase.java55
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/DateAttachmentMapperTests.java52
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/EncryptedDocMapperTests.java135
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/LanguageDetectionAttachmentMapperTests.java128
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MetadataMapperTests.java113
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MultifieldAttachmentMapperTests.java186
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java128
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java65
-rw-r--r--plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/VariousDocTests.java176
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/asciidoc.asciidoc5
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdfbin14682 -> 0 bytes
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithEmptyDateMeta.html11
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html11
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithoutDateMeta.html10
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/issue-104.docxbin58480 -> 0 bytes
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testContentLength.txt9
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testEPUB.epubbin29719 -> 0 bytes
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html29
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-english.txt1
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-french.txt1
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-nolang.txt0
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json9
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/tika-files.zipbin6312340 -> 0 bytes
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/date/date-mapping.json12
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json12
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/language/language-mapping.json12
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/metadata/test-mapping.json9
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json56
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping-all-fields.json19
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping.json9
-rw-r--r--plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/various-doc/test-mapping.json9
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/00_basic.yaml13
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/10_index.yaml148
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/20_search.yaml111
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml58
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml63
-rw-r--r--plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml75
-rw-r--r--plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java5
-rw-r--r--plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java40
-rw-r--r--plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java2
-rw-r--r--plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yaml2
-rw-r--r--plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java4
-rw-r--r--plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java4
-rw-r--r--plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java12
-rw-r--r--plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java25
-rw-r--r--plugins/repository-hdfs/build.gradle5
-rw-r--r--plugins/repository-s3/build.gradle6
-rw-r--r--plugins/repository-s3/config/repository-s3/log4j2.properties8
-rw-r--r--plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java4
-rw-r--r--plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java8
-rw-r--r--plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java4
-rw-r--r--plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java1
-rw-r--r--plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java4
-rw-r--r--plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java4
-rw-r--r--qa/backwards-5.0/build.gradle2
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java4
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/cluster/metadata/EvilSystemPropertyTests.java47
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java154
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java202
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java6
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java54
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java39
-rw-r--r--qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java8
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties34
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties8
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties8
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties27
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here (renamed from plugins/mapper-attachments/licenses/bcmail-jdk15on-NOTICE.txt)0
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties17
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties20
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties22
-rw-r--r--qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/prefix/log4j2.properties20
-rw-r--r--qa/rolling-upgrade/build.gradle71
-rw-r--r--qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java48
-rw-r--r--qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml37
-rw-r--r--qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml34
-rw-r--r--qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml37
-rw-r--r--qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java7
-rw-r--r--qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java13
-rw-r--r--qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml9
-rw-r--r--qa/vagrant/build.gradle18
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats18
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats13
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats22
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats15
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats24
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache3
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash18
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/os_package.bash8
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash21
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/tar.bash2
-rw-r--r--qa/vagrant/versions16
-rw-r--r--rest-api-spec/build.gradle13
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json14
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json45
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json8
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json8
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/explain.json4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/get.json4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/index.json2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/mget.json4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json (renamed from rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json)4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json8
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json4
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/update.json12
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json10
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_fields.yaml49
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yaml76
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml41
-rwxr-xr-xrest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml2
-rwxr-xr-xrest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml1
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml176
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yaml53
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/20_request_timeout.yaml23
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml19
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_force_version.yaml44
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yaml10
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yaml (renamed from rest-api-spec/src/main/resources/rest-api-spec/test/get/20_fields.yaml)22
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yaml10
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yaml14
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml27
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yaml10
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/index/37_force_version.yaml46
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml23
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml16
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml96
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/10_basic.yaml84
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml34
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml1
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_basic.yaml90
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yaml (renamed from rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_fields.yaml)46
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yaml6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yaml2
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yaml46
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yaml44
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml24
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml61
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml54
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml54
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/40_versions.yaml27
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yaml6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yaml6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yaml6
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yaml (renamed from rest-api-spec/src/main/resources/rest-api-spec/test/update/80_fields.yaml)7
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yaml2
-rw-r--r--settings.gradle4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java15
-rw-r--r--test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java (renamed from test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java)110
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java (renamed from core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java)22
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java62
-rw-r--r--test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java2
-rw-r--r--test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java10
-rw-r--r--test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java477
-rw-r--r--test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java10
-rw-r--r--test/framework/src/main/java/org/elasticsearch/ingest/IngestDocumentMatcher.java67
-rw-r--r--test/framework/src/main/java/org/elasticsearch/node/MockNode.java37
-rw-r--r--test/framework/src/main/java/org/elasticsearch/node/NodeTests.java31
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java8
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java280
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java49
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java9
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java112
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java13
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java80
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java5
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java5
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java21
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java10
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/TestCluster.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java61
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java95
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruptionTest.java151
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java18
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java15
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java10
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java132
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java1
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java17
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java60
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java5
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java8
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/FileUtils.java2
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java5
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java18
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java21
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java20
-rw-r--r--test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java42
-rw-r--r--test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java6
-rw-r--r--test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransportPlugin.java22
-rw-r--r--test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java1
-rw-r--r--test/framework/src/main/resources/log4j.properties9
-rw-r--r--test/framework/src/main/resources/log4j2-test.properties10
-rw-r--r--test/framework/src/test/java/Dummy.java (renamed from core/src/main/java/org/apache/log4j/package-info.java)6
-rw-r--r--test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java104
-rw-r--r--test/framework/src/test/java/org/elasticsearch/test/rest/yaml/FileUtilsTests.java2
-rw-r--r--test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java2
-rw-r--r--test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java113
-rw-r--r--test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java3
-rw-r--r--test/logger-usage/build.gradle16
-rw-r--r--test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java190
-rw-r--r--test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java118
1980 files changed, 43039 insertions, 41057 deletions
diff --git a/.gitignore b/.gitignore
index af7e64fc89..d1810a5a83 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,11 @@ nbactions.xml
.gradle/
build/
+# gradle wrapper
+/gradle/
+gradlew
+gradlew.bat
+
# maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log
target/
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b0f1e054e4..da81436b8a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -120,7 +120,8 @@ Please follow these formatting guidelines:
* The rest is left to Java coding standards
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
- * Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
+ * Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
+ * IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
To create a distribution from the source, simply run:
diff --git a/GRADLE.CHEATSHEET b/GRADLE.CHEATSHEET
index 3362b8571e..2c9c34fe1b 100644
--- a/GRADLE.CHEATSHEET
+++ b/GRADLE.CHEATSHEET
@@ -4,4 +4,4 @@ test -> test
verify -> check
verify -Dskip.unit.tests -> integTest
package -DskipTests -> assemble
-install -DskipTests -> install
+install -DskipTests -> publishToMavenLocal
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index 5046dc087b..dd6c093047 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -364,10 +364,12 @@ These are the linux flavors the Vagrantfile currently supports:
* ubuntu-1204 aka precise
* ubuntu-1404 aka trusty
* ubuntu-1504 aka vivid
+* ubuntu-1604 aka xenial
* debian-8 aka jessie, the current debian stable distribution
* centos-6
* centos-7
-* fedora-22
+* fedora-24
+* oel-6 aka Oracle Enterprise Linux 6
* oel-7 aka Oracle Enterprise Linux 7
* sles-12
* opensuse-13
@@ -376,7 +378,6 @@ We're missing the following from the support matrix because there aren't high
quality boxes available in vagrant atlas:
* sles-11
-* oel-6
We're missing the follow because our tests are very linux/bash centric:
diff --git a/Vagrantfile b/Vagrantfile
index 423b50038e..761ef20628 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
end
+ config.vm.define "ubuntu-1604" do |config|
+ config.vm.box = "elastic/ubuntu-16.04-x86_64"
+ ubuntu_common config, extra: <<-SHELL
+ # Install Jayatana so we can work around it being present.
+ [ -f /usr/share/java/jayatanaag.jar ] || install jayatana
+ SHELL
+ end
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
# get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine.
@@ -78,8 +85,8 @@ Vagrant.configure(2) do |config|
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder ".", "/elasticsearch"
config.vm.provider "virtualbox" do |v|
- # Give the boxes 2GB so they can run our tests if they have to.
- v.memory = 2048
+ # Give the boxes 3GB because Elasticsearch defaults to using 2GB
+ v.memory = 3072
end
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle
index 186fdca44e..3b8b92328e 100644
--- a/benchmarks/build.gradle
+++ b/benchmarks/build.gradle
@@ -57,6 +57,9 @@ dependencies {
}
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
+// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
+// needs to be added separately otherwise Gradle will quote it and javac will fail
+compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
forbiddenApis {
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java
index 86902b380c..39cfdb6582 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java
@@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.Settings;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
@@ -160,11 +159,9 @@ public class AllocationBenchmark {
public ClusterState measureAllocation() {
ClusterState clusterState = initialClusterState;
while (clusterState.getRoutingNodes().hasUnassignedShards()) {
- RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
.shardsWithState(ShardRoutingState.INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
- result = strategy.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
}
return clusterState;
}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java
index 9b1cfaabf9..860137cf55 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java
@@ -22,10 +22,10 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@@ -35,9 +35,9 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.gateway.GatewayAllocator;
-import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
-import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
@@ -50,12 +50,12 @@ public final class Allocators {
}
@Override
- public void applyStartedShards(StartedRerouteAllocation allocation) {
+ public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
// noop
}
@Override
- public void applyFailedShards(FailedRerouteAllocation allocation) {
+ public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
// noop
}
@@ -72,7 +72,7 @@ public final class Allocators {
public static AllocationService createAllocationService(Settings settings) throws NoSuchMethodException, InstantiationException,
IllegalAccessException, InvocationTargetException {
- return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings
+ return createAllocationService(settings, new ClusterSettings(Settings.EMPTY, ClusterSettings
.BUILT_IN_CLUSTER_SETTINGS));
}
@@ -85,19 +85,9 @@ public final class Allocators {
public static AllocationDeciders defaultAllocationDeciders(Settings settings, ClusterSettings clusterSettings) throws
IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException {
- List<AllocationDecider> list = new ArrayList<>();
- // Keep a deterministic order of allocation deciders for the benchmark
- for (Class<? extends AllocationDecider> deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
- try {
- Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, ClusterSettings
- .class);
- list.add(constructor.newInstance(settings, clusterSettings));
- } catch (NoSuchMethodException e) {
- Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class);
- list.add(constructor.newInstance(settings));
- }
- }
- return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+ Collection<AllocationDecider> deciders =
+ ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList());
+ return new AllocationDeciders(settings, deciders);
}
diff --git a/benchmarks/src/main/resources/log4j.properties b/benchmarks/src/main/resources/log4j.properties
deleted file mode 100644
index 8ca1bc8729..0000000000
--- a/benchmarks/src/main/resources/log4j.properties
+++ /dev/null
@@ -1,8 +0,0 @@
-# Do not log at all if it is not really critical - we're in a benchmark
-benchmarks.es.logger.level=ERROR
-log4j.rootLogger=${benchmarks.es.logger.level}, out
-
-log4j.appender.out=org.apache.log4j.ConsoleAppender
-log4j.appender.out.layout=org.apache.log4j.PatternLayout
-log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
-
diff --git a/benchmarks/src/main/resources/log4j2.properties b/benchmarks/src/main/resources/log4j2.properties
new file mode 100644
index 0000000000..c3ae1fe56d
--- /dev/null
+++ b/benchmarks/src/main/resources/log4j2.properties
@@ -0,0 +1,8 @@
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+# Do not log at all if it is not really critical - we're in a benchmark
+rootLogger.level = error
+rootLogger.appenderRef.console.ref = console
diff --git a/build.gradle b/build.gradle
index f1b57d7857..e1ab457663 100644
--- a/build.gradle
+++ b/build.gradle
@@ -17,7 +17,6 @@
* under the License.
*/
-import com.bmuschko.gradle.nexus.NexusPlugin
import org.eclipse.jgit.lib.Repository
import org.eclipse.jgit.lib.RepositoryBuilder
import org.gradle.plugins.ide.eclipse.model.SourceFolder
@@ -52,68 +51,6 @@ subprojects {
}
}
}
-
- plugins.withType(NexusPlugin).whenPluginAdded {
- modifyPom {
- project {
- url 'https://github.com/elastic/elasticsearch'
- inceptionYear '2009'
-
- scm {
- url 'https://github.com/elastic/elasticsearch'
- connection 'scm:https://elastic@github.com/elastic/elasticsearch'
- developerConnection 'scm:git://github.com/elastic/elasticsearch.git'
- }
-
- licenses {
- license {
- name 'The Apache Software License, Version 2.0'
- url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
- distribution 'repo'
- }
- }
- }
- }
- extraArchive {
- javadoc = true
- tests = false
- }
- nexus {
- String buildSnapshot = System.getProperty('build.snapshot', 'true')
- if (buildSnapshot == 'false') {
- Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
- String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
- repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
- }
- }
- // we have our own username/password prompts so that they only happen once
- // TODO: add gpg signing prompts, which is tricky, as the buildDeb/buildRpm tasks are executed before this code block
- project.gradle.taskGraph.whenReady { taskGraph ->
- if (taskGraph.allTasks.any { it.name == 'uploadArchives' }) {
- Console console = System.console()
- // no need for username/password on local deploy
- if (project.nexus.repositoryUrl.startsWith('file://')) {
- project.rootProject.allprojects.each {
- it.ext.nexusUsername = 'foo'
- it.ext.nexusPassword = 'bar'
- }
- } else {
- if (project.hasProperty('nexusUsername') == false) {
- String nexusUsername = console.readLine('\nNexus username: ')
- project.rootProject.allprojects.each {
- it.ext.nexusUsername = nexusUsername
- }
- }
- if (project.hasProperty('nexusPassword') == false) {
- String nexusPassword = new String(console.readPassword('\nNexus password: '))
- project.rootProject.allprojects.each {
- it.ext.nexusPassword = nexusPassword
- }
- }
- }
- }
- }
- }
}
allprojects {
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index 1be5020f4f..0e8c2dc141 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -95,7 +95,6 @@ dependencies {
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
compile 'de.thetaphi:forbiddenapis:2.2'
- compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
compile 'org.apache.rat:apache-rat:0.11'
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1'
}
@@ -109,10 +108,6 @@ if (project == rootProject) {
repositories {
mavenCentral()
- maven {
- name 'sonatype-snapshots'
- url "https://oss.sonatype.org/content/repositories/snapshots/"
- }
}
test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*'
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index d996be40f8..ebe4a2bdcc 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -28,11 +28,10 @@ import org.gradle.api.Task
import org.gradle.api.XmlProvider
import org.gradle.api.artifacts.Configuration
import org.gradle.api.artifacts.ModuleDependency
-import org.gradle.api.artifacts.ModuleVersionIdentifier
import org.gradle.api.artifacts.ProjectDependency
import org.gradle.api.artifacts.ResolvedArtifact
import org.gradle.api.artifacts.dsl.RepositoryHandler
-import org.gradle.api.artifacts.maven.MavenPom
+import org.gradle.api.plugins.JavaPlugin
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
@@ -63,7 +62,6 @@ class BuildPlugin implements Plugin<Project> {
project.pluginManager.apply('nebula.info-java')
project.pluginManager.apply('nebula.info-scm')
project.pluginManager.apply('nebula.info-jar')
- project.pluginManager.apply('com.bmuschko.nexus')
project.pluginManager.apply(ProvidedBasePlugin)
globalBuildInfo(project)
@@ -71,6 +69,8 @@ class BuildPlugin implements Plugin<Project> {
configureConfigurations(project)
project.ext.versions = VersionProperties.versions
configureCompile(project)
+ configureJavadocJar(project)
+ configureSourcesJar(project)
configurePomGeneration(project)
configureTest(project)
@@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
private static String findJavaHome() {
String javaHome = System.getenv('JAVA_HOME')
if (javaHome == null) {
- if (System.getProperty("idea.active") != null) {
+ if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
javaHome = Jvm.current().javaHome
} else {
@@ -267,11 +267,6 @@ class BuildPlugin implements Plugin<Project> {
project.configurations.compile.dependencies.all(disableTransitiveDeps)
project.configurations.testCompile.dependencies.all(disableTransitiveDeps)
project.configurations.provided.dependencies.all(disableTransitiveDeps)
-
- // add exclusions to the pom directly, for each of the transitive deps of this project's deps
- project.modifyPom { MavenPom pom ->
- pom.withXml(fixupDependencies(project))
- }
}
/** Adds repositores used by ES dependencies */
@@ -284,10 +279,6 @@ class BuildPlugin implements Plugin<Project> {
repos.mavenLocal()
}
repos.mavenCentral()
- repos.maven {
- name 'sonatype-snapshots'
- url 'http://oss.sonatype.org/content/repositories/snapshots/'
- }
String luceneVersion = VersionProperties.lucene
if (luceneVersion.contains('-snapshot')) {
// extract the revision number from the version with a regex matcher
@@ -394,14 +385,20 @@ class BuildPlugin implements Plugin<Project> {
* -serial because we don't use java serialization.
*/
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
- options.compilerArgs << '-Werror' << '-proc:none' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
+ options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
+
+ // either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly defined
+ if (options.compilerArgs.contains("-processor") == false) {
+ options.compilerArgs << '-proc:none'
+ }
+
options.encoding = 'UTF-8'
//options.incremental = true
if (project.javaVersion == JavaVersion.VERSION_1_9) {
- // hack until gradle supports java 9's new "-release" arg
+ // hack until gradle supports java 9's new "--release" arg
assert minimumJava == JavaVersion.VERSION_1_8
- options.compilerArgs << '-release' << '8'
+ options.compilerArgs << '--release' << '8'
project.sourceCompatibility = null
project.targetCompatibility = null
}
@@ -409,6 +406,25 @@ class BuildPlugin implements Plugin<Project> {
}
}
+ /** Adds a javadocJar task to generate a jar containing javadocs. */
+ static void configureJavadocJar(Project project) {
+ Jar javadocJarTask = project.task('javadocJar', type: Jar)
+ javadocJarTask.classifier = 'javadoc'
+ javadocJarTask.group = 'build'
+ javadocJarTask.description = 'Assembles a jar containing javadocs.'
+ javadocJarTask.from(project.tasks.getByName(JavaPlugin.JAVADOC_TASK_NAME))
+ project.assemble.dependsOn(javadocJarTask)
+ }
+
+ static void configureSourcesJar(Project project) {
+ Jar sourcesJarTask = project.task('sourcesJar', type: Jar)
+ sourcesJarTask.classifier = 'sources'
+ sourcesJarTask.group = 'build'
+ sourcesJarTask.description = 'Assembles a jar containing source files.'
+ sourcesJarTask.from(project.sourceSets.main.allSource)
+ project.assemble.dependsOn(sourcesJarTask)
+ }
+
/** Adds additional manifest info to jars, and adds source and javadoc jars */
static void configureJars(Project project) {
project.tasks.withType(Jar) { Jar jarTask ->
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
index 0c2e37ab82..11bdbd1952 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.gradle.doc
+import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.RestTestPlugin
import org.gradle.api.Project
import org.gradle.api.Task
@@ -30,9 +31,19 @@ public class DocsTestPlugin extends RestTestPlugin {
@Override
public void apply(Project project) {
super.apply(project)
+ Map<String, String> defaultSubstitutions = [
+ /* These match up with the asciidoc syntax for substitutions but
+ * the values may differ. In particular {version} needs to resolve
+ * to the version being built for testing but needs to resolve to
+ * the last released version for docs. */
+ '\\{version\\}':
+ VersionProperties.elasticsearch.replace('-SNAPSHOT', ''),
+ '\\{lucene_version\\}' : VersionProperties.lucene,
+ ]
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
listSnippets.group 'Docs'
listSnippets.description 'List each snippet'
+ listSnippets.defaultSubstitutions = defaultSubstitutions
listSnippets.perSnippet { println(it.toString()) }
Task listConsoleCandidates = project.tasks.create(
@@ -40,26 +51,15 @@ public class DocsTestPlugin extends RestTestPlugin {
listConsoleCandidates.group 'Docs'
listConsoleCandidates.description
'List snippets that probably should be marked // CONSOLE'
+ listConsoleCandidates.defaultSubstitutions = defaultSubstitutions
listConsoleCandidates.perSnippet {
- if (
- it.console != null // Already marked, nothing to do
- || it.testResponse // It is a response
- ) {
- return
+ if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) {
+ println(it.toString())
}
- List<String> languages = [
- // This language should almost always be marked console
- 'js',
- // These are often curl commands that should be converted but
- // are probably false positives
- 'sh', 'shell',
- ]
- if (false == languages.contains(it.language)) {
- return
- }
- println(it.toString())
}
- project.tasks.create('buildRestTests', RestTestsFromSnippetsTask)
+ Task buildRestTests = project.tasks.create(
+ 'buildRestTests', RestTestsFromSnippetsTask)
+ buildRestTests.defaultSubstitutions = defaultSubstitutions
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
index 61a07f4fbd..9b83545f65 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
@@ -42,6 +42,16 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
Map<String, String> setups = new HashMap()
/**
+ * A list of files that contain snippets that *probably* should be
+ * converted to `// CONSOLE` but have yet to be converted. If a file is in
+ * this list and doesn't contain unconverted snippets this task will fail.
+ * If there are unconverted snippets not in this list then this task will
+ * fail. All files are paths relative to the docs dir.
+ */
+ @Input
+ List<String> expectedUnconvertedCandidates = []
+
+ /**
* Root directory of the tests being generated. To make rest tests happy
* we generate them in a testRoot() which is contained in this directory.
*/
@@ -56,6 +66,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
TestBuilder builder = new TestBuilder()
doFirst { outputRoot().delete() }
perSnippet builder.&handleSnippet
+ doLast builder.&checkUnconverted
doLast builder.&finishLastTest
}
@@ -67,6 +78,27 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
return new File(testRoot, '/rest-api-spec/test')
}
+ /**
+ * Is this snippet a candidate for conversion to `// CONSOLE`?
+ */
+ static isConsoleCandidate(Snippet snippet) {
+ /* Snippets that are responses or already marked as `// CONSOLE` or
+ * `// NOTCONSOLE` are not candidates. */
+ if (snippet.console != null || snippet.testResponse) {
+ return false
+ }
+ /* js snippets almost always should be marked with `// CONSOLE`. js
+ * snippets that shouldn't be marked `// CONSOLE`, like examples for
+ * js client, should always be marked with `// NOTCONSOLE`.
+ *
+ * `sh` snippets that contain `curl` almost always should be marked
+ * with `// CONSOLE`. In the exceptionally rare cases where they are
+ * not communicating with Elasticsearch, like the xamples in the ec2
+ * and gce discovery plugins, the snippets should be marked
+ * `// NOTCONSOLE`. */
+ return snippet.language == 'js' || snippet.curl
+ }
+
private class TestBuilder {
private static final String SYNTAX = {
String method = /(?<method>GET|PUT|POST|HEAD|OPTIONS|DELETE)/
@@ -89,10 +121,21 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
PrintWriter current
/**
+ * Files containing all snippets that *probably* should be converted
+ * to `// CONSOLE` but have yet to be converted. All files are paths
+ * relative to the docs dir.
+ */
+ Set<String> unconvertedCandidates = new HashSet<>()
+
+ /**
* Called each time a snippet is encountered. Tracks the snippets and
* calls buildTest to actually build the test.
*/
void handleSnippet(Snippet snippet) {
+ if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) {
+ unconvertedCandidates.add(snippet.path.toString()
+ .replace('\\', '/'))
+ }
if (BAD_LANGUAGES.contains(snippet.language)) {
throw new InvalidUserDataException(
"$snippet: Use `js` instead of `${snippet.language}`.")
@@ -117,7 +160,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
if (false == test.continued) {
current.println('---')
- current.println("\"$test.start\":")
+ current.println("\"line_$test.start\":")
}
if (test.skipTest) {
current.println(" - skip:")
@@ -146,6 +189,14 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
void emitDo(String method, String pathAndQuery, String body,
String catchPart, List warnings, boolean inSetup) {
def (String path, String query) = pathAndQuery.tokenize('?')
+ if (path == null) {
+ path = '' // Catch requests to the root...
+ } else {
+ // Escape some characters that are also escaped by sense
+ path = path.replace('<', '%3C').replace('>', '%3E')
+ path = path.replace('{', '%7B').replace('}', '%7D')
+ path = path.replace('|', '%7C')
+ }
current.println(" - do:")
if (catchPart != null) {
current.println(" catch: $catchPart")
@@ -247,5 +298,35 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current = null
}
}
+
+ void checkUnconverted() {
+ List<String> listedButNotFound = []
+ for (String listed : expectedUnconvertedCandidates) {
+ if (false == unconvertedCandidates.remove(listed)) {
+ listedButNotFound.add(listed)
+ }
+ }
+ String message = ""
+ if (false == listedButNotFound.isEmpty()) {
+ Collections.sort(listedButNotFound)
+ listedButNotFound = listedButNotFound.collect {' ' + it}
+ message += "Expected unconverted snippets but none found in:\n"
+ message += listedButNotFound.join("\n")
+ }
+ if (false == unconvertedCandidates.isEmpty()) {
+ List<String> foundButNotListed =
+ new ArrayList<>(unconvertedCandidates)
+ Collections.sort(foundButNotListed)
+ foundButNotListed = foundButNotListed.collect {' ' + it}
+ if (false == "".equals(message)) {
+ message += "\n"
+ }
+ message += "Unexpected unconverted snippets:\n"
+ message += foundButNotListed.join("\n")
+ }
+ if (false == "".equals(message)) {
+ throw new InvalidUserDataException(message);
+ }
+ }
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
index 8c3524a9b9..518b4da439 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
@@ -22,6 +22,7 @@ package org.elasticsearch.gradle.doc
import org.gradle.api.DefaultTask
import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.ConfigurableFileTree
+import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.TaskAction
@@ -38,6 +39,7 @@ public class SnippetsTask extends DefaultTask {
private static final String SKIP = /skip:([^\]]+)/
private static final String SETUP = /setup:([^ \]]+)/
private static final String WARNING = /warning:(.+)/
+ private static final String CAT = /(_cat)/
private static final String TEST_SYNTAX =
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/
@@ -60,6 +62,12 @@ public class SnippetsTask extends DefaultTask {
exclude 'build'
}
+ /**
+ * Substitutions done on every snippet's contents.
+ */
+ @Input
+ Map<String, String> defaultSubstitutions = [:]
+
@TaskAction
public void executeTask() {
/*
@@ -75,21 +83,39 @@ public class SnippetsTask extends DefaultTask {
Closure emit = {
snippet.contents = contents.toString()
contents = null
+ Closure doSubstitution = { String pattern, String subst ->
+ /*
+ * $body is really common but it looks like a
+ * backreference so we just escape it here to make the
+ * tests cleaner.
+ */
+ subst = subst.replace('$body', '\\$body')
+ // \n is a new line....
+ subst = subst.replace('\\n', '\n')
+ snippet.contents = snippet.contents.replaceAll(
+ pattern, subst)
+ }
+ defaultSubstitutions.each doSubstitution
if (substitutions != null) {
- substitutions.each { String pattern, String subst ->
- /*
- * $body is really common but it looks like a
- * backreference so we just escape it here to make the
- * tests cleaner.
- */
- subst = subst.replace('$body', '\\$body')
- // \n is a new line....
- subst = subst.replace('\\n', '\n')
- snippet.contents = snippet.contents.replaceAll(
- pattern, subst)
- }
+ substitutions.each doSubstitution
substitutions = null
}
+ if (snippet.language == null) {
+ throw new InvalidUserDataException("$snippet: "
+ + "Snippet missing a language. This is required by "
+ + "Elasticsearch's doc testing infrastructure so we "
+ + "be sure we don't accidentally forget to test a "
+ + "snippet.")
+ }
+ // Try to detect snippets that contain `curl`
+ if (snippet.language == 'sh' || snippet.language == 'shell') {
+ snippet.curl = snippet.contents.contains('curl')
+ if (snippet.console == false && snippet.curl == false) {
+ throw new InvalidUserDataException("$snippet: "
+ + "No need for NOTCONSOLE if snippet doesn't "
+ + "contain `curl`.")
+ }
+ }
perSnippet(snippet)
snippet = null
}
@@ -107,7 +133,7 @@ public class SnippetsTask extends DefaultTask {
}
return
}
- matcher = line =~ /\[source,(\w+)]\s*/
+ matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/
if (matcher.matches()) {
lastLanguage = matcher.group(1)
lastLanguageLine = lineNumber
@@ -196,8 +222,17 @@ public class SnippetsTask extends DefaultTask {
substitutions = []
}
String loc = "$file:$lineNumber"
- parse(loc, matcher.group(2), /$SUBSTITUTION ?/) {
- substitutions.add([it.group(1), it.group(2)])
+ parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$CAT) ?/) {
+ if (it.group(1) != null) {
+ // TESTRESPONSE[s/adsf/jkl/]
+ substitutions.add([it.group(1), it.group(2)])
+ } else if (it.group(3) != null) {
+ // TESTRESPONSE[_cat]
+ substitutions.add(['^', '/'])
+ substitutions.add(['\n$', '\\\\s*/'])
+ substitutions.add(['( +)', '$1\\\\s+'])
+ substitutions.add(['\n', '\\\\s*\n '])
+ }
}
}
return
@@ -250,6 +285,7 @@ public class SnippetsTask extends DefaultTask {
String language = null
String catchPart = null
String setup = null
+ boolean curl
List warnings = new ArrayList()
@Override
@@ -285,6 +321,9 @@ public class SnippetsTask extends DefaultTask {
if (testSetup) {
result += '// TESTSETUP'
}
+ if (curl) {
+ result += '(curl)'
+ }
return result
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
index b7b563bf15..c93ecb4094 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
@@ -56,12 +56,8 @@ public class PluginBuildPlugin extends BuildPlugin {
// for plugins which work with the transport client, we copy the jar
// file to a new name, copy the nebula generated pom to the same name,
// and generate a different pom for the zip
- project.signArchives.enabled = false
addClientJarPomGeneration(project)
addClientJarTask(project)
- if (isModule == false) {
- addZipPomGeneration(project)
- }
} else {
// no client plugin, so use the pom file from nebula, without jar, for the zip
project.ext.set("nebulaPublish.maven.jar", false)
@@ -97,8 +93,8 @@ public class PluginBuildPlugin extends BuildPlugin {
// with a full elasticsearch server that includes optional deps
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}"
- provided "log4j:log4j:${project.versions.log4j}"
- provided "log4j:apache-log4j-extras:${project.versions.log4j}"
+ provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
+ provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}
@@ -152,7 +148,7 @@ public class PluginBuildPlugin extends BuildPlugin {
/** Adds a task to move jar and associated files to a "-client" name. */
protected static void addClientJarTask(Project project) {
Task clientJar = project.tasks.create('clientJar')
- clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
+ clientJar.dependsOn(project.jar, 'generatePomFileForClientJarPublication', project.javadocJar, project.sourcesJar)
clientJar.doFirst {
Path jarFile = project.jar.outputs.files.singleFile.toPath()
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
@@ -179,7 +175,10 @@ public class PluginBuildPlugin extends BuildPlugin {
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
/** Find the reponame. */
- protected static String urlFromOrigin(String origin) {
+ static String urlFromOrigin(String origin) {
+ if (origin == null) {
+ return null // best effort, the url doesnt really matter, it is just required by maven central
+ }
if (origin.startsWith('https')) {
return origin
}
@@ -197,9 +196,9 @@ public class PluginBuildPlugin extends BuildPlugin {
project.publishing {
publications {
- jar(MavenPublication) {
+ clientJar(MavenPublication) {
from project.components.java
- artifactId = artifactId + '-client'
+ artifactId = project.pluginProperties.extension.name + '-client'
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
@@ -213,7 +212,7 @@ public class PluginBuildPlugin extends BuildPlugin {
}
}
- /** Adds a task to generate a*/
+ /** Adds a task to generate a pom file for the zip distribution. */
protected void addZipPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)
@@ -221,7 +220,18 @@ public class PluginBuildPlugin extends BuildPlugin {
publications {
zip(MavenPublication) {
artifact project.bundlePlugin
- pom.packaging = 'pom'
+ }
+ /* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts
+ * when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files
+ * for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch
+ * under the various other subprojects. So here we create another publication using the same
+ * name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks
+ * in alphabetical order. This lets us publish the zip file and even though the pom says the
+ * type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the
+ * publishing tasks are created *extremely* late in the configuration phase, so that we cannot get
+ * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
+ * maven local work, since we publish to maven central externally. */
+ zipReal(MavenPublication) {
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
index a5e1e4c893..f451beeceb 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
@@ -147,6 +147,9 @@ class PrecommitTasks {
checkstyleTask.dependsOn(task)
task.dependsOn(copyCheckstyleConf)
task.inputs.file(checkstyleSuppressions)
+ task.reports {
+ html.enabled false
+ }
}
}
return checkstyleTask
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy
index d45741a50b..48183a0772 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy
@@ -20,8 +20,6 @@ package org.elasticsearch.gradle.test
import org.gradle.api.GradleException
import org.gradle.api.Project
-import org.gradle.api.artifacts.Configuration
-import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
/** Configuration for an elasticsearch cluster, used for integration tests. */
@@ -47,6 +45,17 @@ class ClusterConfiguration {
@Input
int transportPort = 0
+ /**
+ * An override of the data directory. This may only be used with a single node.
+ * The value is lazily evaluated at runtime as a String path.
+ */
+ @Input
+ Object dataDir = null
+
+ /** Optional override of the cluster name. */
+ @Input
+ String clusterName = null
+
@Input
boolean daemonize = true
@@ -54,16 +63,29 @@ class ClusterConfiguration {
boolean debug = false
@Input
- String jvmArgs = System.getProperty('tests.jvm.argline', '')
+ String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
+ " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
+ " " + System.getProperty('tests.jvm.argline', '')
/**
- * The seed nodes port file. In the case the cluster has more than one node we use a seed node
- * to form the cluster. The file is null if there is no seed node yet available.
+ * A closure to call which returns the unicast host to connect to for cluster formation.
*
- * Note: this can only be null if the cluster has only one node or if the first node is not yet
- * configured. All nodes but the first node should see a non null value.
+ * This allows multi node clusters, or a new cluster to connect to an existing cluster.
+ * The closure takes two arguments, the NodeInfo for the first node in the cluster, and
+ * an AntBuilder which may be used to wait on conditions before returning.
*/
- File seedNodePortsFile
+ @Input
+ Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant ->
+ if (seedNode == node) {
+ return null
+ }
+ ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
+ resourceexists {
+ file(file: seedNode.transportPortsFile.toString())
+ }
+ }
+ return seedNode.transportUri()
+ }
/**
* A closure to call before the cluster is considered ready. The closure is passed the node info,
@@ -73,7 +95,11 @@ class ClusterConfiguration {
@Input
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
- ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
+ ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}")
+ // checking here for wait_for_nodes to be >= the number of nodes because its possible
+ // this cluster is attempting to connect to nodes created by another task (same cluster name),
+ // so there will be more nodes in that case in the cluster state
+ ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}",
dest: tmpFile.toString(),
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
retries: 10)
@@ -135,12 +161,4 @@ class ClusterConfiguration {
}
extraConfigFiles.put(path, sourceFile)
}
-
- /** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
- String seedNodeTransportUri() {
- if (seedNodePortsFile != null) {
- return seedNodePortsFile.readLines("UTF-8").get(0)
- }
- return null;
- }
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
index b1f0726501..957e845aa5 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
@@ -46,9 +46,9 @@ class ClusterFormationTasks {
/**
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
*
- * Returns a NodeInfo object for the first node in the cluster.
+ * Returns a list of NodeInfo objects for each node in the cluster.
*/
- static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
+ static List<NodeInfo> setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run!
return
@@ -72,10 +72,9 @@ class ClusterFormationTasks {
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
}
// this is our current version distribution configuration we use for all kinds of REST tests etc.
- project.configurations {
- elasticsearchDistro
- }
- configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch)
+ String distroConfigName = "${task.name}_elasticsearchDistro"
+ Configuration distro = project.configurations.create(distroConfigName)
+ configureDistributionDependency(project, config.distribution, distro, VersionProperties.elasticsearch)
if (config.bwcVersion != null && config.numBwcNodes > 0) {
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
// this version uses the same distribution etc. and only differs in the version we depend on.
@@ -91,28 +90,19 @@ class ClusterFormationTasks {
// we start N nodes and out of these N nodes there might be M bwc nodes.
// for each of those nodes we might have a different configuratioon
String elasticsearchVersion = VersionProperties.elasticsearch
- Configuration configuration = project.configurations.elasticsearchDistro
if (i < config.numBwcNodes) {
elasticsearchVersion = config.bwcVersion
- configuration = project.configurations.elasticsearchBwcDistro
+ distro = project.configurations.elasticsearchBwcDistro
}
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
- if (i == 0) {
- if (config.seedNodePortsFile != null) {
- // we might allow this in the future to be set but for now we are the only authority to set this!
- throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
- }
- config.seedNodePortsFile = node.transportPortsFile;
- }
nodes.add(node)
- startTasks.add(configureNode(project, task, cleanup, node, configuration))
+ startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0)))
}
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
task.dependsOn(wait)
- // delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
- return nodes[0]
+ return nodes
}
/** Adds a dependency on the given distribution */
@@ -143,7 +133,7 @@ class ClusterFormationTasks {
*
* @return a task which starts the node.
*/
- static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) {
+ static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
// tasks are chained so their execution order is maintained
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
@@ -156,8 +146,7 @@ class ClusterFormationTasks {
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
- setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
- setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
+ setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
// install modules
@@ -172,6 +161,10 @@ class ClusterFormationTasks {
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
}
+ // sets up any extra config files that need to be copied over to the ES instance;
+ // its run after plugins have been installed, as the extra config files may belong to plugins
+ setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
+
// extra setup commands
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
// the first argument is the actual script name, relative to home
@@ -183,9 +176,10 @@ class ClusterFormationTasks {
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
if (node.config.daemonize) {
- // if we are running in the background, make sure to stop the server when the task completes
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
+ // if we are running in the background, make sure to stop the server when the task completes
task.finalizedBy(stop)
+ start.finalizedBy(stop)
}
return start
}
@@ -251,7 +245,7 @@ class ClusterFormationTasks {
}
/** Adds a task to write elasticsearch.yml for the given node configuration */
- static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
+ static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, NodeInfo seedNode) {
Map esConfig = [
'cluster.name' : node.clusterName,
'pidfile' : node.pidFile,
@@ -268,15 +262,9 @@ class ClusterFormationTasks {
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
writeConfig.doFirst {
- if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
- ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
- resourceexists {
- file(file: node.config.seedNodePortsFile.toString())
- }
- }
- // the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
- // host and join the cluster via that.
- esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
+ String unicastTransportUri = node.config.unicastTransportUri(seedNode, node, project.ant)
+ if (unicastTransportUri != null) {
+ esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\""
}
File configFile = new File(node.confDir, 'elasticsearch.yml')
logger.info("Configuring ${configFile}")
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
index 5d9961a042..85af2debf1 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
@@ -57,6 +57,9 @@ class NodeInfo {
/** config directory */
File confDir
+ /** data directory (as an Object, to allow lazy evaluation) */
+ Object dataDir
+
/** THE config file */
File configFile
@@ -95,11 +98,23 @@ class NodeInfo {
this.config = config
this.nodeNum = nodeNum
this.sharedDir = sharedDir
- clusterName = "${task.path.replace(':', '_').substring(1)}"
+ if (config.clusterName != null) {
+ clusterName = config.clusterName
+ } else {
+ clusterName = "${task.path.replace(':', '_').substring(1)}"
+ }
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid')
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
confDir = confDir(baseDir, config.distribution, nodeVersion)
+ if (config.dataDir != null) {
+ if (config.numNodes != 1) {
+ throw new IllegalArgumentException("Cannot set data dir for integ test with more than one node")
+ }
+ dataDir = config.dataDir
+ } else {
+ dataDir = new File(homeDir, "data")
+ }
configFile = new File(confDir, 'elasticsearch.yml')
// even for rpm/deb, the logs are under home because we dont start with real services
File logsDir = new File(homeDir, 'logs')
@@ -140,7 +155,7 @@ class NodeInfo {
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
- args.addAll("-E", "path.conf=${confDir}")
+ args.addAll("-E", "path.conf=${confDir}", "-E", "path.data=${-> dataDir.toString()}")
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
@@ -184,6 +199,19 @@ class NodeInfo {
return transportPortsFile.readLines("UTF-8").get(0)
}
+ /** Returns the file which contains the transport protocol ports for this node */
+ File getTransportPortsFile() {
+ return transportPortsFile
+ }
+
+ /** Returns the data directory for this node */
+ File getDataDir() {
+ if (!(dataDir instanceof File)) {
+ return new File(dataDir)
+ }
+ return dataDir
+ }
+
/** Returns the directory elasticsearch home is contained in for the given distribution */
static File homeDir(File baseDir, String distro, String nodeVersion) {
String path
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
index c6463d2881..d50937408e 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
@@ -34,6 +34,9 @@ public class RestIntegTestTask extends RandomizedTestingTask {
ClusterConfiguration clusterConfig
+ /** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
+ List<NodeInfo> nodes
+
/** Flag indicating whether the rest tests in the rest spec should be run. */
@Input
boolean includePackaged = false
@@ -52,6 +55,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
parallelism = '1'
include('**/*IT.class')
systemProperty('tests.rest.load_packaged', 'false')
+ systemProperty('tests.rest.cluster', "${-> nodes[0].httpUri()}")
+ systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
+ // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
+ // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
+ // both as separate sysprops
+ systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
// copy the rest spec/tests into the test resources
RestSpecHack.configureDependencies(project)
@@ -61,13 +70,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
// this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured
project.gradle.projectsEvaluated {
- NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
- systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
- systemProperty('tests.config.dir', "${-> node.confDir}")
- // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
- // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
- // both as separate sysprops
- systemProperty('tests.cluster', "${-> node.transportUri()}")
+ nodes = ClusterFormationTasks.setup(project, this, clusterConfig)
}
}
@@ -88,6 +91,10 @@ public class RestIntegTestTask extends RandomizedTestingTask {
return clusterConfig
}
+ public List<NodeInfo> getNodes() {
+ return nodes
+ }
+
@Override
public Task dependsOn(Object... dependencies) {
super.dependsOn(dependencies)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestSpecHack.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestSpecHack.groovy
index 43b5c2f6f3..296ae71157 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestSpecHack.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestSpecHack.groovy
@@ -43,18 +43,22 @@ public class RestSpecHack {
}
/**
- * Creates a task to copy the rest spec files.
+ * Creates a task (if necessary) to copy the rest spec files.
*
* @param project The project to add the copy task to
* @param includePackagedTests true if the packaged tests should be copied, false otherwise
*/
public static Task configureTask(Project project, boolean includePackagedTests) {
+ Task copyRestSpec = project.tasks.findByName('copyRestSpec')
+ if (copyRestSpec != null) {
+ return copyRestSpec
+ }
Map copyRestSpecProps = [
name : 'copyRestSpec',
type : Copy,
dependsOn: [project.configurations.restSpec, 'processTestResources']
]
- Task copyRestSpec = project.tasks.create(copyRestSpecProps) {
+ copyRestSpec = project.tasks.create(copyRestSpecProps) {
from { project.zipTree(project.configurations.restSpec.singleFile) }
include 'rest-api-spec/api/**'
if (includePackagedTests) {
diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index 9e1574987e..a995c201c4 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -10,6 +10,9 @@
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
+ <!-- ThrowableProxy is a forked copy from Log4j to hack around a bug; this can be removed when the hack is removed -->
+ <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]logging[/\\]log4j[/\\]core[/\\]impl[/\\]ThrowableProxy.java" checks="RegexpSinglelineJava" />
+
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
files start to pass. -->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" />
@@ -21,7 +24,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]NodeInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" />
@@ -236,7 +238,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]IncompatibleClusterStateVersionException.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]InternalClusterInfoService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]LocalNodeMasterListener.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeIndexDeletedAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeMappingRefreshAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]shard[/\\]ShardStateAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]block[/\\]ClusterBlock.java" checks="LineLength" />
@@ -248,8 +249,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MappingMetaData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataCreateIndexService.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataDeleteIndexService.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexAliasesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexStateService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexTemplateService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexUpgradeService.java" checks="LineLength" />
@@ -303,7 +302,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
@@ -344,7 +342,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistry.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CommonGramsTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]NumericDoubleAnalyzer.java" checks="LineLength" />
@@ -388,7 +385,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" />
@@ -417,7 +413,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RootObjectMapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]InnerHitsQueryParserHelper.java" checks="LineLength" />
@@ -460,7 +455,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveriesCollection.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryFailedException.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySettings.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySource.java" checks="LineLength" />
+ <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]PeerRecoverySourceService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]StartRecoveryRequest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStore.java" checks="LineLength" />
@@ -471,9 +466,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" />
@@ -492,15 +485,12 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestPendingClusterTasksAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]AbstractScriptParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModes.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptParameterParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]Template.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
@@ -559,29 +549,19 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueParser.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]builder[/\\]SearchSourceBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]controller[/\\]SearchPhaseController.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchPhase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSearchResult.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhase.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhaseParseElement.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]DefaultSearchContext.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]FilteredSearchContext.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]InternalSearchHit.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]SearchContext.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafFieldsLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]QueryPhase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescorer.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]RescoreParseElement.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]GeoDistanceSortParser.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]ScriptSortParser.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]SortParseElement.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestContextParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestUtils.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
@@ -590,9 +570,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoQueryContext.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]CandidateScorer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellChecker.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]WordScorer.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
@@ -629,7 +607,7 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
+ <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
@@ -688,7 +666,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AllocationPriorityTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AwarenessAllocationTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]BalanceConfigurationTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]CatAllocationTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ClusterRebalanceRoutingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ConcurrentRebalanceRoutingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]DeadNodesAllocationTests.java" checks="LineLength" />
@@ -723,7 +700,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]codecs[/\\]CodecTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]BooleansTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreContainerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
@@ -869,7 +845,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]ExceptionRetryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" />
@@ -909,8 +884,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]template[/\\]SimpleIndexTemplateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
@@ -961,7 +934,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportTwoNodesSearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ChildQuerySearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ParentFieldLoadingIT.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhasePluginIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoBoundingBoxIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoFilterIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoShapeQueryTests.java" checks="LineLength" />
@@ -998,9 +970,7 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPoolSerializationTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]update[/\\]UpdateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
@@ -1017,7 +987,6 @@
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
- <suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
@@ -1045,14 +1014,6 @@
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonSecurityTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]AttachmentMapper.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]DateAttachmentMapperTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]EncryptedDocMapperTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MetadataMapperTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MultifieldAttachmentMapperTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]SimpleAttachmentMapperTests.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]StandaloneRunner.java" checks="LineLength" />
- <suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]VariousDocTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />
@@ -1085,7 +1046,6 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CompositeTestCluster.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
- <suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESAllocationTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBackcompatTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESIntegTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESSingleNodeTestCase.java" checks="LineLength" />
@@ -1095,7 +1055,6 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
- <suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]TestSearchContext.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]NoopClusterService.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]TestClusterService.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]discovery[/\\]ClusterDiscoveryConfiguration.java" checks="LineLength" />
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 334ee06df0..625d0b6635 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,12 +1,12 @@
-elasticsearch = 5.0.0-alpha6
-lucene = 6.1.0
+elasticsearch = 6.0.0-alpha1
+lucene = 6.2.0
# optional dependencies
spatial4j = 0.6
jts = 1.13
jackson = 2.8.1
snakeyaml = 1.15
-log4j = 1.2.17
+log4j = 2.6.2
slf4j = 1.6.2
jna = 4.2.2
@@ -20,4 +20,4 @@ commonscodec = 1.10
hamcrest = 1.3
securemock = 1.2
# benchmark dependencies
-jmh = 1.12
+jmh = 1.14
diff --git a/client/benchmark/README.md b/client/benchmark/README.md
index 96eac6bf51..06211b9d8f 100644
--- a/client/benchmark/README.md
+++ b/client/benchmark/README.md
@@ -1,34 +1,53 @@
-Steps to execute the benchmark:
+### Steps to execute the benchmark
-1. Start Elasticsearch on the target host (ideally *not* on the same machine)
-2. Create an empty index with the mapping you want to benchmark
-3. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
-4. Delete the index
-5. Repeat steps 2. - 4. for multiple iterations. The first iterations are intended as warmup for Elasticsearch itself. Always start the same benchmark in step 3!
-4. After the benchmark: Shutdown Elasticsearch and delete the data directory
+1. Build `client-benchmark-noop-api-plugin` with `gradle :client:client-benchmark-noop-api-plugin:assemble`
+2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip`
+3. Start Elasticsearch on the target host (ideally *not* on the same machine)
+4. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
Repeat all steps above for the other benchmark candidate.
-Example benchmark:
+### Example benchmark
-* Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress
-* Use the mapping file https://github.com/elastic/rally-tracks/blob/master/geonames/mappings.json to create the index
+In general, you should define a few GC-related settings `-Xms8192M -Xmx8192M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
-Example command line parameter list:
+#### Bulk indexing
+
+Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress them.
+
+Example command line parameters:
```
-rest 192.168.2.2 /home/your_user_name/.rally/benchmarks/data/geonames/documents.json geonames type 8647880 5000 "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\""
+rest bulk 192.168.2.2 ./documents.json geonames type 8647880 5000
```
The parameters are in order:
* Client type: Use either "rest" or "transport"
+* Benchmark type: Use either "bulk" or "search"
* Benchmark target host IP (the host where Elasticsearch is running)
* full path to the file that should be bulk indexed
* name of the index
* name of the (sole) type in the index
* number of documents in the file
* bulk size
-* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
-
-You should also define a few GC-related settings `-Xms4096M -Xmx4096M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
+
+
+#### Bulk indexing
+
+Example command line parameters:
+
+```
+rest search 192.168.2.2 geonames "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\"" 500,1000,1100,1200
+```
+
+The parameters are in order:
+
+* Client type: Use either "rest" or "transport"
+* Benchmark type: Use either "bulk" or "search"
+* Benchmark target host IP (the host where Elasticsearch is running)
+* name of the index
+* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
+* A comma-separated list of target throughput rates
+
+
diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle
index eb0480a92e..e17c7b8470 100644
--- a/client/benchmark/build.gradle
+++ b/client/benchmark/build.gradle
@@ -50,6 +50,8 @@ dependencies {
compile 'org.apache.commons:commons-math3:3.2'
compile("org.elasticsearch.client:rest:${version}")
+ // bottleneck should be the client, not Elasticsearch
+ compile project(path: ':client:client-benchmark-noop-api-plugin')
// for transport client
compile("org.elasticsearch:elasticsearch:${version}")
compile("org.elasticsearch.client:transport:${version}")
@@ -62,7 +64,3 @@ dependencies {
// No licenses for our benchmark deps (we don't ship benchmarks)
dependencyLicenses.enabled = false
-
-extraArchive {
- javadoc = false
-}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/AbstractBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/AbstractBenchmark.java
index d4608c052e..23cb29563b 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/AbstractBenchmark.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/AbstractBenchmark.java
@@ -27,7 +27,11 @@ import org.elasticsearch.common.SuppressForbidden;
import java.io.Closeable;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
import java.util.List;
+import java.util.stream.Collectors;
public abstract class AbstractBenchmark<T extends Closeable> {
private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000;
@@ -40,54 +44,113 @@ public abstract class AbstractBenchmark<T extends Closeable> {
@SuppressForbidden(reason = "system out is ok for a command line tool")
public final void run(String[] args) throws Exception {
- if (args.length < 6) {
+ if (args.length < 1) {
+ System.err.println("usage: [search|bulk]");
+ System.exit(1);
+ }
+ switch (args[0]) {
+ case "search":
+ runSearchBenchmark(args);
+ break;
+ case "bulk":
+ runBulkIndexBenchmark(args);
+ break;
+ default:
+ System.err.println("Unknown benchmark type [" + args[0] + "]");
+ System.exit(1);
+
+ }
+
+ }
+
+ @SuppressForbidden(reason = "system out is ok for a command line tool")
+ private void runBulkIndexBenchmark(String[] args) throws Exception {
+ if (args.length != 7) {
System.err.println(
- "usage: benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize [search request body]");
+ "usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize");
System.exit(1);
}
- String benchmarkTargetHost = args[0];
- String indexFilePath = args[1];
- String indexName = args[2];
- String typeName = args[3];
- int totalDocs = Integer.valueOf(args[4]);
- int bulkSize = Integer.valueOf(args[5]);
+ String benchmarkTargetHost = args[1];
+ String indexFilePath = args[2];
+ String indexName = args[3];
+ String typeName = args[4];
+ int totalDocs = Integer.valueOf(args[5]);
+ int bulkSize = Integer.valueOf(args[6]);
int totalIterationCount = (int) Math.floor(totalDocs / bulkSize);
// consider 40% of all iterations as warmup iterations
int warmupIterations = (int) (0.4d * totalIterationCount);
int iterations = totalIterationCount - warmupIterations;
- String searchBody = (args.length == 7) ? args[6] : null;
T client = client(benchmarkTargetHost);
BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations,
new BulkBenchmarkTask(
- bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations + iterations, bulkSize));
+ bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize));
+
+ try {
+ runTrials(() -> {
+ runGc();
+ benchmark.run();
+ });
+ } finally {
+ client.close();
+ }
+
+ }
+
+ @SuppressForbidden(reason = "system out is ok for a command line tool")
+ private void runSearchBenchmark(String[] args) throws Exception {
+ if (args.length != 5) {
+ System.err.println(
+ "usage: 'search' benchmarkTargetHostIp indexName searchRequestBody throughputRates");
+ System.exit(1);
+ }
+ String benchmarkTargetHost = args[1];
+ String indexName = args[2];
+ String searchBody = args[3];
+ List<Integer> throughputRates = Arrays.asList(args[4].split(",")).stream().map(Integer::valueOf).collect(Collectors.toList());
+
+ T client = client(benchmarkTargetHost);
try {
- benchmark.run();
- if (searchBody != null) {
- for (int run = 1; run <= 5; run++) {
- System.out.println("=============");
- System.out.println(" Trial run " + run);
- System.out.println("=============");
-
- for (int throughput = 100; throughput <= 100_000; throughput *= 10) {
- //GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
- runGc();
- BenchmarkRunner searchBenchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
- new SearchBenchmarkTask(
- searchRequestExecutor(client, indexName), searchBody, 2 * SEARCH_BENCHMARK_ITERATIONS, throughput));
- System.out.printf("Target throughput = %d ops / s%n", throughput);
- searchBenchmark.run();
- }
+ runTrials(() -> {
+ for (int throughput : throughputRates) {
+ //GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
+ runGc();
+ BenchmarkRunner benchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
+ new SearchBenchmarkTask(
+ searchRequestExecutor(client, indexName), searchBody, SEARCH_BENCHMARK_ITERATIONS,
+ SEARCH_BENCHMARK_ITERATIONS, throughput));
+ System.out.printf("Target throughput = %d ops / s%n", throughput);
+ benchmark.run();
}
- }
+ });
} finally {
client.close();
}
}
+ @SuppressForbidden(reason = "system out is ok for a command line tool")
+ private void runTrials(Runnable runner) {
+ int totalWarmupTrialRuns = 1;
+ for (int run = 1; run <= totalWarmupTrialRuns; run++) {
+ System.out.println("======================");
+ System.out.println(" Warmup trial run " + run + "/" + totalWarmupTrialRuns);
+ System.out.println("======================");
+ runner.run();
+ }
+
+ int totalTrialRuns = 5;
+ for (int run = 1; run <= totalTrialRuns; run++) {
+ System.out.println("================");
+ System.out.println(" Trial run " + run + "/" + totalTrialRuns);
+ System.out.println("================");
+
+ runner.run();
+ }
+ }
+
/**
* Requests a full GC and checks whether the GC did actually run after a request. It retries up to 5 times in case the GC did not
* run in time.
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java
index 55e09fb438..317f0bf479 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java
@@ -37,7 +37,7 @@ public class BenchmarkMain {
benchmark = new RestClientBenchmark();
break;
default:
- System.err.println("Unknown benchmark type [" + type + "]");
+ System.err.println("Unknown client type [" + type + "]");
System.exit(1);
}
benchmark.run(Arrays.copyOfRange(args, 1, args.length));
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkRunner.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkRunner.java
index 655b5815f3..dfb1984f4f 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkRunner.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkRunner.java
@@ -40,8 +40,8 @@ public final class BenchmarkRunner {
}
@SuppressForbidden(reason = "system out is ok for a command line tool")
- public void run() throws Exception {
- SampleRecorder recorder = new SampleRecorder(warmupIterations, iterations);
+ public void run() {
+ SampleRecorder recorder = new SampleRecorder(iterations);
System.out.printf("Running %s with %d warmup iterations and %d iterations.%n",
task.getClass().getSimpleName(), warmupIterations, iterations);
@@ -52,6 +52,8 @@ public final class BenchmarkRunner {
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
}
List<Sample> samples = recorder.getSamples();
@@ -62,17 +64,24 @@ public final class BenchmarkRunner {
}
for (Metrics metrics : summaryMetrics) {
- System.out.printf(Locale.ROOT, "Operation: %s%n", metrics.operation);
- String stats = String.format(Locale.ROOT,
- "Throughput = %f ops/s, p90 = %f ms, p95 = %f ms, p99 = %f ms, p99.9 = %f ms, p99.99 = %f ms",
- metrics.throughput,
- metrics.serviceTimeP90, metrics.serviceTimeP95,
- metrics.serviceTimeP99, metrics.serviceTimeP999,
- metrics.serviceTimeP9999);
- System.out.println(repeat(stats.length(), '-'));
- System.out.println(stats);
+ String throughput = String.format(Locale.ROOT, "Throughput [ops/s]: %f", metrics.throughput);
+ String serviceTimes = String.format(Locale.ROOT,
+ "Service time [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
+ metrics.serviceTimeP50, metrics.serviceTimeP90, metrics.serviceTimeP95,
+ metrics.serviceTimeP99, metrics.serviceTimeP999, metrics.serviceTimeP9999);
+ String latencies = String.format(Locale.ROOT,
+ "Latency [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
+ metrics.latencyP50, metrics.latencyP90, metrics.latencyP95,
+ metrics.latencyP99, metrics.latencyP999, metrics.latencyP9999);
+
+ int lineLength = Math.max(serviceTimes.length(), latencies.length());
+
+ System.out.println(repeat(lineLength, '-'));
+ System.out.println(throughput);
+ System.out.println(serviceTimes);
+ System.out.println(latencies);
System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount);
- System.out.println(repeat(stats.length(), '-'));
+ System.out.println(repeat(lineLength, '-'));
}
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Metrics.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Metrics.java
index 9108afe444..e099c531db 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Metrics.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Metrics.java
@@ -23,23 +23,38 @@ public final class Metrics {
public final long successCount;
public final long errorCount;
public final double throughput;
+ public final double serviceTimeP50;
public final double serviceTimeP90;
public final double serviceTimeP95;
public final double serviceTimeP99;
public final double serviceTimeP999;
public final double serviceTimeP9999;
+ public final double latencyP50;
+ public final double latencyP90;
+ public final double latencyP95;
+ public final double latencyP99;
+ public final double latencyP999;
+ public final double latencyP9999;
public Metrics(String operation, long successCount, long errorCount, double throughput,
- double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
- double serviceTimeP999, double serviceTimeP9999) {
+ double serviceTimeP50, double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
+ double serviceTimeP999, double serviceTimeP9999, double latencyP50, double latencyP90,
+ double latencyP95, double latencyP99, double latencyP999, double latencyP9999) {
this.operation = operation;
this.successCount = successCount;
this.errorCount = errorCount;
this.throughput = throughput;
+ this.serviceTimeP50 = serviceTimeP50;
this.serviceTimeP90 = serviceTimeP90;
this.serviceTimeP95 = serviceTimeP95;
this.serviceTimeP99 = serviceTimeP99;
this.serviceTimeP999 = serviceTimeP999;
this.serviceTimeP9999 = serviceTimeP9999;
+ this.latencyP50 = latencyP50;
+ this.latencyP90 = latencyP90;
+ this.latencyP95 = latencyP95;
+ this.latencyP99 = latencyP99;
+ this.latencyP999 = latencyP999;
+ this.latencyP9999 = latencyP9999;
}
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/MetricsCalculator.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/MetricsCalculator.java
index 5b455127f5..a0be3d901d 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/MetricsCalculator.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/MetricsCalculator.java
@@ -50,13 +50,16 @@ public final class MetricsCalculator {
for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) {
List<Sample> samples = operationAndMetrics.getValue();
double[] serviceTimes = new double[samples.size()];
+ double[] latencies = new double[samples.size()];
int it = 0;
long firstStart = Long.MAX_VALUE;
long latestEnd = Long.MIN_VALUE;
for (Sample sample : samples) {
firstStart = Math.min(sample.getStartTimestamp(), firstStart);
latestEnd = Math.max(sample.getStopTimestamp(), latestEnd);
- serviceTimes[it++] = sample.getServiceTime();
+ serviceTimes[it] = sample.getServiceTime();
+ latencies[it] = sample.getLatency();
+ it++;
}
metrics.add(new Metrics(operationAndMetrics.getKey(),
@@ -65,11 +68,18 @@ public final class MetricsCalculator {
// throughput calculation is based on the total (Wall clock) time it took to generate all samples
calculateThroughput(samples.size(), latestEnd - firstStart),
// convert ns -> ms without losing precision
+ StatUtils.percentile(serviceTimes, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
- StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
+ StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
+ StatUtils.percentile(latencies, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
}
return metrics;
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Sample.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Sample.java
index 59cd6bfd10..114baa5533 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Sample.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/Sample.java
@@ -20,12 +20,14 @@ package org.elasticsearch.client.benchmark.metrics;
public final class Sample {
private final String operation;
+ private final long expectedStartTimestamp;
private final long startTimestamp;
private final long stopTimestamp;
private final boolean success;
- public Sample(String operation, long startTimestamp, long stopTimestamp, boolean success) {
+ public Sample(String operation, long expectedStartTimestamp, long startTimestamp, long stopTimestamp, boolean success) {
this.operation = operation;
+ this.expectedStartTimestamp = expectedStartTimestamp;
this.startTimestamp = startTimestamp;
this.stopTimestamp = stopTimestamp;
this.success = success;
@@ -48,7 +50,10 @@ public final class Sample {
}
public long getServiceTime() {
- // this is *not* latency, we're not including wait time in the queue (on purpose)
return stopTimestamp - startTimestamp;
}
+
+ public long getLatency() {
+ return stopTimestamp - expectedStartTimestamp;
+ }
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/SampleRecorder.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/SampleRecorder.java
index d9f24aea00..63e1627f04 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/SampleRecorder.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/metrics/SampleRecorder.java
@@ -28,21 +28,14 @@ import java.util.List;
* This class is NOT threadsafe.
*/
public final class SampleRecorder {
- private final int warmupIterations;
private final List<Sample> samples;
- private int currentIteration;
- public SampleRecorder(int warmupIterations, int iterations) {
- this.warmupIterations = warmupIterations;
+ public SampleRecorder(int iterations) {
this.samples = new ArrayList<>(iterations);
}
public void addSample(Sample sample) {
- currentIteration++;
- // only add samples after warmup
- if (currentIteration > warmupIterations) {
- samples.add(sample);
- }
+ samples.add(sample);
}
public List<Sample> getSamples() {
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java
index 5844103fd1..214a75d12c 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java
@@ -18,13 +18,13 @@
*/
package org.elasticsearch.client.benchmark.ops.bulk;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.BufferedReader;
@@ -43,15 +43,18 @@ import java.util.concurrent.TimeUnit;
public class BulkBenchmarkTask implements BenchmarkTask {
private final BulkRequestExecutor requestExecutor;
private final String indexFilePath;
- private final int totalIterations;
+ private final int warmupIterations;
+ private final int measurementIterations;
private final int bulkSize;
private LoadGenerator generator;
private ExecutorService executorService;
- public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int totalIterations, int bulkSize) {
+ public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int warmupIterations, int measurementIterations,
+ int bulkSize) {
this.requestExecutor = requestExecutor;
this.indexFilePath = indexFilePath;
- this.totalIterations = totalIterations;
+ this.warmupIterations = warmupIterations;
+ this.measurementIterations = measurementIterations;
this.bulkSize = bulkSize;
}
@@ -60,7 +63,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
public void setUp(SampleRecorder sampleRecorder) {
BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256);
- BulkIndexer runner = new BulkIndexer(bulkQueue, totalIterations, sampleRecorder, requestExecutor);
+ BulkIndexer runner = new BulkIndexer(bulkQueue, warmupIterations, measurementIterations, sampleRecorder, requestExecutor);
executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner"));
executorService.submit(runner);
@@ -132,24 +135,26 @@ public class BulkBenchmarkTask implements BenchmarkTask {
private static final class BulkIndexer implements Runnable {
- private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
+ private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData;
- private final int totalIterations;
+ private final int warmupIterations;
+ private final int measurementIterations;
private final BulkRequestExecutor bulkRequestExecutor;
private final SampleRecorder sampleRecorder;
- public BulkIndexer(BlockingQueue<List<String>> bulkData, int totalIterations, SampleRecorder sampleRecorder,
- BulkRequestExecutor bulkRequestExecutor) {
+ public BulkIndexer(BlockingQueue<List<String>> bulkData, int warmupIterations, int measurementIterations,
+ SampleRecorder sampleRecorder, BulkRequestExecutor bulkRequestExecutor) {
this.bulkData = bulkData;
- this.totalIterations = totalIterations;
+ this.warmupIterations = warmupIterations;
+ this.measurementIterations = measurementIterations;
this.bulkRequestExecutor = bulkRequestExecutor;
this.sampleRecorder = sampleRecorder;
}
@Override
public void run() {
- for (int iteration = 0; iteration < totalIterations; iteration++) {
+ for (int iteration = 0; iteration < warmupIterations + measurementIterations; iteration++) {
boolean success = false;
List<String> currentBulk;
try {
@@ -158,8 +163,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
Thread.currentThread().interrupt();
return;
}
- // Yes, this approach is prone to coordinated omission *but* we have to consider that we want to benchmark a closed system
- // with backpressure here instead of an open system. So this is actually correct in this case.
+ //measure only service time, latency is not that interesting for a throughput benchmark
long start = System.nanoTime();
try {
success = bulkRequestExecutor.bulkIndex(currentBulk);
@@ -167,7 +171,9 @@ public class BulkBenchmarkTask implements BenchmarkTask {
logger.warn("Error while executing bulk request", ex);
}
long stop = System.nanoTime();
- sampleRecorder.addSample(new Sample("bulk", start, stop, success));
+ if (iteration < warmupIterations) {
+ sampleRecorder.addSample(new Sample("bulk", start, start, stop, success));
+ }
}
}
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/search/SearchBenchmarkTask.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/search/SearchBenchmarkTask.java
index a71221610b..4f370a520a 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/search/SearchBenchmarkTask.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/search/SearchBenchmarkTask.java
@@ -25,20 +25,20 @@ import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import java.util.concurrent.TimeUnit;
public class SearchBenchmarkTask implements BenchmarkTask {
- private static final long MICROS_PER_SEC = TimeUnit.SECONDS.toMicros(1L);
- private static final long NANOS_PER_MICRO = TimeUnit.MICROSECONDS.toNanos(1L);
-
private final SearchRequestExecutor searchRequestExecutor;
private final String searchRequestBody;
- private final int iterations;
+ private final int warmupIterations;
+ private final int measurementIterations;
private final int targetThroughput;
private SampleRecorder sampleRecorder;
- public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int iterations, int targetThroughput) {
+ public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int warmupIterations,
+ int measurementIterations, int targetThroughput) {
this.searchRequestExecutor = searchRequestExecutor;
this.searchRequestBody = body;
- this.iterations = iterations;
+ this.warmupIterations = warmupIterations;
+ this.measurementIterations = measurementIterations;
this.targetThroughput = targetThroughput;
}
@@ -49,28 +49,25 @@ public class SearchBenchmarkTask implements BenchmarkTask {
@Override
public void run() throws Exception {
- for (int iteration = 0; iteration < this.iterations; iteration++) {
- final long start = System.nanoTime();
- boolean success = searchRequestExecutor.search(searchRequestBody);
- final long stop = System.nanoTime();
- sampleRecorder.addSample(new Sample("search", start, stop, success));
-
- int waitTime = (int) Math.floor(MICROS_PER_SEC / targetThroughput - (stop - start) / NANOS_PER_MICRO);
- if (waitTime > 0) {
- waitMicros(waitTime);
- }
- }
+ runIterations(warmupIterations, false);
+ runIterations(measurementIterations, true);
}
- private void waitMicros(int waitTime) throws InterruptedException {
- // Thread.sleep() time is not very accurate (it's most of the time around 1 - 2 ms off)
- // we busy spin all the time to avoid introducing additional measurement artifacts (noticed 100% skew on 99.9th percentile)
- // this approach is not suitable for low throughput rates (in the second range) though
- if (waitTime > 0) {
- long end = System.nanoTime() + 1000L * waitTime;
- while (end > System.nanoTime()) {
+ private void runIterations(int iterations, boolean addSample) {
+ long interval = TimeUnit.SECONDS.toNanos(1L) / targetThroughput;
+
+ long totalStart = System.nanoTime();
+ for (int iteration = 0; iteration < iterations; iteration++) {
+ long expectedStart = totalStart + iteration * interval;
+ while (System.nanoTime() < expectedStart) {
// busy spin
}
+ long start = System.nanoTime();
+ boolean success = searchRequestExecutor.search(searchRequestBody);
+ long stop = System.nanoTime();
+ if (addSample) {
+ sampleRecorder.addSample(new Sample("search", expectedStart, start, stop, success));
+ }
}
}
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java
index bf661fa661..b342d93fba 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java
@@ -19,14 +19,20 @@
package org.elasticsearch.client.benchmark.rest;
import org.apache.http.HttpEntity;
+import org.apache.http.HttpHeaders;
import org.apache.http.HttpHost;
import org.apache.http.HttpStatus;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
+import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
@@ -45,7 +51,12 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
@Override
protected RestClient client(String benchmarkTargetHost) {
- return RestClient.builder(new HttpHost(benchmarkTargetHost, 9200)).build();
+ return RestClient
+ .builder(new HttpHost(benchmarkTargetHost, 9200))
+ .setHttpClientConfigCallback(b -> b.setDefaultHeaders(
+ Collections.singleton(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "gzip"))))
+ .setRequestConfigCallback(b -> b.setContentCompressionEnabled(true))
+ .build();
}
@Override
@@ -77,7 +88,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
}
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON);
try {
- Response response = client.performRequest("POST", "/geonames/type/_bulk", Collections.emptyMap(), entity);
+ Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (Exception e) {
throw new ElasticsearchException(e);
@@ -91,7 +102,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
private RestSearchRequestExecutor(RestClient client, String indexName) {
this.client = client;
- this.endpoint = "/" + indexName + "/_search";
+ this.endpoint = "/" + indexName + "/_noop_search";
}
@Override
diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java
index c52414cf3a..c38234ef30 100644
--- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java
+++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java
@@ -19,7 +19,6 @@
package org.elasticsearch.client.benchmark.transport;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse;
@@ -30,6 +29,11 @@ import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.plugin.noop.NoopPlugin;
+import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
+import org.elasticsearch.plugin.noop.action.bulk.NoopBulkRequestBuilder;
+import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
+import org.elasticsearch.plugin.noop.action.search.NoopSearchRequestBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
@@ -46,7 +50,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override
protected TransportClient client(String benchmarkTargetHost) throws Exception {
- TransportClient client = new PreBuiltTransportClient(Settings.EMPTY);
+ TransportClient client = new PreBuiltTransportClient(Settings.EMPTY, NoopPlugin.class);
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
return client;
}
@@ -74,7 +78,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override
public boolean bulkIndex(List<String> bulkData) {
- BulkRequestBuilder builder = client.prepareBulk();
+ NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client);
for (String bulkItem : bulkData) {
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8)));
}
@@ -103,8 +107,11 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override
public boolean search(String source) {
final SearchResponse response;
+ NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client);
try {
- response = client.prepareSearch(indexName).setQuery(QueryBuilders.wrapperQuery(source)).execute().get();
+ builder.setIndices(indexName);
+ builder.setQuery(QueryBuilders.wrapperQuery(source));
+ response = client.execute(NoopSearchAction.INSTANCE, builder.request()).get();
return response.status() == RestStatus.OK;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
diff --git a/client/benchmark/src/main/resources/log4j.properties b/client/benchmark/src/main/resources/log4j.properties
deleted file mode 100644
index 22f54ef68e..0000000000
--- a/client/benchmark/src/main/resources/log4j.properties
+++ /dev/null
@@ -1,9 +0,0 @@
-es.logger.level=INFO
-log4j.rootLogger=${es.logger.level}, out
-
-log4j.logger.org.apache.http=INFO, out
-log4j.additivity.org.apache.http=false
-
-log4j.appender.out=org.apache.log4j.ConsoleAppender
-log4j.appender.out.layout=org.apache.log4j.PatternLayout
-log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
diff --git a/client/benchmark/src/main/resources/log4j2.properties b/client/benchmark/src/main/resources/log4j2.properties
new file mode 100644
index 0000000000..8652131bf4
--- /dev/null
+++ b/client/benchmark/src/main/resources/log4j2.properties
@@ -0,0 +1,7 @@
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
diff --git a/client/client-benchmark-noop-api-plugin/README.md b/client/client-benchmark-noop-api-plugin/README.md
new file mode 100644
index 0000000000..f9f7128231
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/README.md
@@ -0,0 +1,23 @@
+### Purpose
+
+This plugin provides empty REST and transport endpoints for bulk indexing and search. It is used to avoid accidental server-side bottlenecks in client-side benchmarking.
+
+### Build Instructions
+
+Build the plugin with `gradle :client:client-benchmark-noop-api-plugin:assemble` from the Elasticsearch root project directory.
+
+### Installation Instructions
+
+After, the binary has been built, install it with `bin/elasticsearch-plugin install file:///full/path/to/noop-plugin.zip`.
+
+### Usage
+
+The plugin provides two REST endpoints:
+
+* `/_noop_bulk` and all variations that the bulk endpoint provides (except that all no op endpoints are called `_noop_bulk` instead of `_bulk`)
+* `_noop_search` and all variations that the search endpoint provides (except that all no op endpoints are called `_noop_search` instead of `_search`)
+
+The corresponding transport actions are:
+
+* `org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction`
+* `org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction` \ No newline at end of file
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java b/client/client-benchmark-noop-api-plugin/build.gradle
index f42110c1e6..a0d52f1591 100644
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java
+++ b/client/client-benchmark-noop-api-plugin/build.gradle
@@ -1,5 +1,3 @@
-package org.elasticsearch.mapper.attachments;
-
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@@ -19,12 +17,19 @@ package org.elasticsearch.mapper.attachments;
* under the License.
*/
-import org.elasticsearch.test.ESTestCase;
+group = 'org.elasticsearch.plugin'
-public class TikaImplTests extends ESTestCase {
-
- public void testTikaLoads() throws Exception {
- Class.forName("org.elasticsearch.mapper.attachments.TikaImpl");
- }
+apply plugin: 'elasticsearch.esplugin'
+esplugin {
+ name 'client-benchmark-noop-api'
+ description 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking'
+ classname 'org.elasticsearch.plugin.noop.NoopPlugin'
}
+
+compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
+
+// no unit tests
+test.enabled = false
+integTest.enabled = false
+
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java
new file mode 100644
index 0000000000..343d3cf613
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop;
+
+import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
+import org.elasticsearch.plugin.noop.action.bulk.RestNoopBulkAction;
+import org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
+import org.elasticsearch.plugin.noop.action.search.RestNoopSearchAction;
+import org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction;
+import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.rest.RestHandler;
+
+import java.util.Arrays;
+import java.util.List;
+
+public class NoopPlugin extends Plugin implements ActionPlugin {
+ @Override
+ public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
+ return Arrays.asList(
+ new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class),
+ new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class)
+ );
+ }
+
+ @Override
+ public List<Class<? extends RestHandler>> getRestHandlers() {
+ return Arrays.asList(RestNoopBulkAction.class, RestNoopSearchAction.class);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java
index e967ad9d79..7f5ec6edd8 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java
@@ -16,29 +16,29 @@
* specific language governing permissions and limitations
* under the License.
*/
+package org.elasticsearch.plugin.noop.action.bulk;
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.ElasticsearchClient;
-package org.elasticsearch.common.logging;
+public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRequestBuilder> {
+ public static final String NAME = "mock:data/write/bulk";
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-import org.elasticsearch.cli.Terminal;
+ public static final NoopBulkAction INSTANCE = new NoopBulkAction();
-/**
- * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
- * */
-public class TerminalAppender extends AppenderSkeleton {
- @Override
- protected void append(LoggingEvent event) {
- Terminal.DEFAULT.println(event.getRenderedMessage());
+ private NoopBulkAction() {
+ super(NAME);
}
@Override
- public void close() {
+ public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
+ return new NoopBulkRequestBuilder(client, this);
}
@Override
- public boolean requiresLayout() {
- return false;
+ public BulkResponse newResponse() {
+ return new BulkResponse(null, 0);
}
}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java
new file mode 100644
index 0000000000..ceaf9f8cc9
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.bulk;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.support.WriteRequestBuilder;
+import org.elasticsearch.action.support.replication.ReplicationRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+
+public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, NoopBulkRequestBuilder>
+ implements WriteRequestBuilder<NoopBulkRequestBuilder> {
+
+ public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
+ super(client, action, new BulkRequest());
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public NoopBulkRequestBuilder add(IndexRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public NoopBulkRequestBuilder add(IndexRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public NoopBulkRequestBuilder add(DeleteRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public NoopBulkRequestBuilder add(DeleteRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+
+ /**
+ * Adds an {@link UpdateRequest} to the list of actions to execute.
+ */
+ public NoopBulkRequestBuilder add(UpdateRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link UpdateRequest} to the list of actions to execute.
+ */
+ public NoopBulkRequestBuilder add(UpdateRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public NoopBulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
+ request.add(data, from, length, null, null);
+ return this;
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public NoopBulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType)
+ throws Exception {
+ request.add(data, from, length, defaultIndex, defaultType);
+ return this;
+ }
+
+ /**
+ * Sets the number of shard copies that must be active before proceeding with the write.
+ * See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
+ */
+ public NoopBulkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
+ request.waitForActiveShards(waitForActiveShards);
+ return this;
+ }
+
+ /**
+ * A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
+ * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
+ * to get the ActiveShardCount.
+ */
+ public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
+ return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final NoopBulkRequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * The number of actions currently in the bulk.
+ */
+ public int numberOfActions() {
+ return request.numberOfActions();
+ }
+}
+
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
new file mode 100644
index 0000000000..e63d1e4452
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.bulk;
+
+import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.DocumentRequest;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkShardRequest;
+import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.action.RestBuilderListener;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+public class RestNoopBulkAction extends BaseRestHandler {
+ @Inject
+ public RestNoopBulkAction(Settings settings, RestController controller) {
+ super(settings);
+
+ controller.registerHandler(POST, "/_noop_bulk", this);
+ controller.registerHandler(PUT, "/_noop_bulk", this);
+ controller.registerHandler(POST, "/{index}/_noop_bulk", this);
+ controller.registerHandler(PUT, "/{index}/_noop_bulk", this);
+ controller.registerHandler(POST, "/{index}/{type}/_noop_bulk", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_noop_bulk", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception {
+ BulkRequest bulkRequest = Requests.bulkRequest();
+ String defaultIndex = request.param("index");
+ String defaultType = request.param("type");
+ String defaultRouting = request.param("routing");
+ String fieldsParam = request.param("fields");
+ String defaultPipeline = request.param("pipeline");
+ String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
+
+ String waitForActiveShards = request.param("wait_for_active_shards");
+ if (waitForActiveShards != null) {
+ bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
+ }
+ bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
+ bulkRequest.setRefreshPolicy(request.param("refresh"));
+ bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true);
+
+ // short circuit the call to the transport layer
+ BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
+ listener.onResponse(bulkRequest);
+
+ }
+
+ private static class BulkRestBuilderListener extends RestBuilderListener<BulkRequest> {
+ private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE,
+ new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
+
+ private final RestRequest request;
+
+
+ public BulkRestBuilderListener(RestChannel channel, RestRequest request) {
+ super(channel);
+ this.request = request;
+ }
+
+ @Override
+ public RestResponse buildResponse(BulkRequest bulkRequest, XContentBuilder builder) throws Exception {
+ builder.startObject();
+ builder.field(Fields.TOOK, 0);
+ builder.field(Fields.ERRORS, false);
+ builder.startArray(Fields.ITEMS);
+ for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) {
+ builder.startObject();
+ ITEM_RESPONSE.toXContent(builder, request);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return new BytesRestResponse(OK, builder);
+ }
+ }
+
+ static final class Fields {
+ static final String ITEMS = "items";
+ static final String ERRORS = "errors";
+ static final String TOOK = "took";
+ }
+}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java
new file mode 100644
index 0000000000..931e672446
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.bulk;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.DocumentRequest;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
+ private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE,
+ new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
+
+ @Inject
+ public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
+ ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
+ super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
+ }
+
+ @Override
+ protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
+ final int itemCount = request.subRequests().size();
+ // simulate at least a realistic amount of data that gets serialized
+ BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
+ for (int idx = 0; idx < itemCount; idx++) {
+ bulkItemResponses[idx] = ITEM_RESPONSE;
+ }
+ listener.onResponse(new BulkResponse(bulkItemResponses, 0));
+ }
+}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java
new file mode 100644
index 0000000000..b24190b694
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.search;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.ElasticsearchClient;
+
+public class NoopSearchAction extends Action<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
+ public static final NoopSearchAction INSTANCE = new NoopSearchAction();
+ public static final String NAME = "mock:data/read/search";
+
+ public NoopSearchAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) {
+ return new NoopSearchRequestBuilder(client, this);
+ }
+
+ @Override
+ public SearchResponse newResponse() {
+ return new SearchResponse();
+ }
+}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java
new file mode 100644
index 0000000000..f40941a602
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java
@@ -0,0 +1,496 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.search;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
+import org.elasticsearch.search.rescore.RescoreBuilder;
+import org.elasticsearch.search.slice.SliceBuilder;
+import org.elasticsearch.search.sort.SortBuilder;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.util.Arrays;
+import java.util.List;
+
+public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
+
+ public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
+ super(client, action, new SearchRequest());
+ }
+
+ /**
+ * Sets the indices the search will be executed on.
+ */
+ public NoopSearchRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public NoopSearchRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
+ */
+ public NoopSearchRequestBuilder setSearchType(SearchType searchType) {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * The a string representation search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. Can be
+ * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
+ * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
+ */
+ public NoopSearchRequestBuilder setSearchType(String searchType) {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public NoopSearchRequestBuilder setScroll(Scroll scroll) {
+ request.scroll(scroll);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public NoopSearchRequestBuilder setScroll(TimeValue keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public NoopSearchRequestBuilder setScroll(String keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ /**
+ * An optional timeout to control how long search is allowed to take.
+ */
+ public NoopSearchRequestBuilder setTimeout(TimeValue timeout) {
+ sourceBuilder().timeout(timeout);
+ return this;
+ }
+
+ /**
+ * An optional document count, upon collecting which the search
+ * query will early terminate
+ */
+ public NoopSearchRequestBuilder setTerminateAfter(int terminateAfter) {
+ sourceBuilder().terminateAfter(terminateAfter);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public NoopSearchRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public NoopSearchRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public NoopSearchRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ * <p>
+ * For example indices that don't exist.
+ */
+ public NoopSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request().indicesOptions(indicesOptions);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a search query.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public NoopSearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().query(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Sets a filter that will be executed after the query has been executed and only has affect on the search hits
+ * (not aggregations). This filter is always executed as last filtering mechanism.
+ */
+ public NoopSearchRequestBuilder setPostFilter(QueryBuilder postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets the minimum score below which docs will be filtered out.
+ */
+ public NoopSearchRequestBuilder setMinScore(float minScore) {
+ sourceBuilder().minScore(minScore);
+ return this;
+ }
+
+ /**
+ * From index to start the search from. Defaults to <tt>0</tt>.
+ */
+ public NoopSearchRequestBuilder setFrom(int from) {
+ sourceBuilder().from(from);
+ return this;
+ }
+
+ /**
+ * The number of search hits to return. Defaults to <tt>10</tt>.
+ */
+ public NoopSearchRequestBuilder setSize(int size) {
+ sourceBuilder().size(size);
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with an
+ * explanation of the hit (ranking).
+ */
+ public NoopSearchRequestBuilder setExplain(boolean explain) {
+ sourceBuilder().explain(explain);
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with its
+ * version.
+ */
+ public NoopSearchRequestBuilder setVersion(boolean version) {
+ sourceBuilder().version(version);
+ return this;
+ }
+
+ /**
+ * Sets the boost a specific index will receive when the query is executed against it.
+ *
+ * @param index The index to apply the boost against
+ * @param indexBoost The boost to apply to the index
+ */
+ public NoopSearchRequestBuilder addIndexBoost(String index, float indexBoost) {
+ sourceBuilder().indexBoost(index, indexBoost);
+ return this;
+ }
+
+ /**
+ * The stats groups this request will be aggregated under.
+ */
+ public NoopSearchRequestBuilder setStats(String... statsGroups) {
+ sourceBuilder().stats(Arrays.asList(statsGroups));
+ return this;
+ }
+
+ /**
+ * The stats groups this request will be aggregated under.
+ */
+ public NoopSearchRequestBuilder setStats(List<String> statsGroups) {
+ sourceBuilder().stats(statsGroups);
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the stored _source for every hit
+ */
+ public NoopSearchRequestBuilder setFetchSource(boolean fetch) {
+ sourceBuilder().fetchSource(fetch);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include An optional include (optionally wildcarded) pattern to filter the returned _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public NoopSearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
+ sourceBuilder().fetchSource(include, exclude);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
+ * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public NoopSearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ sourceBuilder().fetchSource(includes, excludes);
+ return this;
+ }
+
+ /**
+ * Adds a docvalue based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The field to get from the docvalue
+ */
+ public NoopSearchRequestBuilder addDocValueField(String name) {
+ sourceBuilder().docValueField(name);
+ return this;
+ }
+
+ /**
+ * Adds a stored field to load and return (note, it must be stored) as part of the search request.
+ * If none are specified, the source of the document will be return.
+ */
+ public NoopSearchRequestBuilder addStoredField(String field) {
+ sourceBuilder().storedField(field);
+ return this;
+ }
+
+
+ /**
+ * Adds a script based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The name that will represent this value in the return hit
+ * @param script The script to use
+ */
+ public NoopSearchRequestBuilder addScriptField(String name, Script script) {
+ sourceBuilder().scriptField(name, script);
+ return this;
+ }
+
+ /**
+ * Adds a sort against the given field name and the sort ordering.
+ *
+ * @param field The name of the field
+ * @param order The sort ordering
+ */
+ public NoopSearchRequestBuilder addSort(String field, SortOrder order) {
+ sourceBuilder().sort(field, order);
+ return this;
+ }
+
+ /**
+ * Adds a generic sort builder.
+ *
+ * @see org.elasticsearch.search.sort.SortBuilders
+ */
+ public NoopSearchRequestBuilder addSort(SortBuilder sort) {
+ sourceBuilder().sort(sort);
+ return this;
+ }
+
+ /**
+ * Set the sort values that indicates which docs this request should "search after".
+ */
+ public NoopSearchRequestBuilder searchAfter(Object[] values) {
+ sourceBuilder().searchAfter(values);
+ return this;
+ }
+
+ public NoopSearchRequestBuilder slice(SliceBuilder builder) {
+ sourceBuilder().slice(builder);
+ return this;
+ }
+
+ /**
+ * Applies when sorting, and controls if scores will be tracked as well. Defaults to
+ * <tt>false</tt>.
+ */
+ public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
+ sourceBuilder().trackScores(trackScores);
+ return this;
+ }
+
+
+ /**
+ * Sets the fields to load and return as part of the search request. If none
+ * are specified, the source of the document will be returned.
+ */
+ public NoopSearchRequestBuilder storedFields(String... fields) {
+ sourceBuilder().storedFields(Arrays.asList(fields));
+ return this;
+ }
+
+ /**
+ * Adds an aggregation to the search operation.
+ */
+ public NoopSearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
+ sourceBuilder().aggregation(aggregation);
+ return this;
+ }
+
+ /**
+ * Adds an aggregation to the search operation.
+ */
+ public NoopSearchRequestBuilder addAggregation(PipelineAggregationBuilder aggregation) {
+ sourceBuilder().aggregation(aggregation);
+ return this;
+ }
+
+ public NoopSearchRequestBuilder highlighter(HighlightBuilder highlightBuilder) {
+ sourceBuilder().highlighter(highlightBuilder);
+ return this;
+ }
+
+ /**
+ * Delegates to {@link org.elasticsearch.search.builder.SearchSourceBuilder#suggest(SuggestBuilder)}
+ */
+ public NoopSearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
+ sourceBuilder().suggest(suggestBuilder);
+ return this;
+ }
+
+ /**
+ * Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
+ * {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
+ *
+ * @param rescorer rescorer configuration
+ * @return this for chaining
+ */
+ public NoopSearchRequestBuilder setRescorer(RescoreBuilder<?> rescorer) {
+ sourceBuilder().clearRescorers();
+ return addRescorer(rescorer);
+ }
+
+ /**
+ * Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
+ * {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
+ *
+ * @param rescorer rescorer configuration
+ * @param window rescore window
+ * @return this for chaining
+ */
+ public NoopSearchRequestBuilder setRescorer(RescoreBuilder rescorer, int window) {
+ sourceBuilder().clearRescorers();
+ return addRescorer(rescorer.windowSize(window));
+ }
+
+ /**
+ * Adds a new rescorer.
+ *
+ * @param rescorer rescorer configuration
+ * @return this for chaining
+ */
+ public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer) {
+ sourceBuilder().addRescorer(rescorer);
+ return this;
+ }
+
+ /**
+ * Adds a new rescorer.
+ *
+ * @param rescorer rescorer configuration
+ * @param window rescore window
+ * @return this for chaining
+ */
+ public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer, int window) {
+ sourceBuilder().addRescorer(rescorer.windowSize(window));
+ return this;
+ }
+
+ /**
+ * Clears all rescorers from the builder.
+ *
+ * @return this for chaining
+ */
+ public NoopSearchRequestBuilder clearRescorers() {
+ sourceBuilder().clearRescorers();
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a SearchSourceBuilder.
+ */
+ public NoopSearchRequestBuilder setSource(SearchSourceBuilder source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets if this request should use the request cache or not, assuming that it can (for
+ * example, if "now" is used, it will never be cached). By default (not set, or null,
+ * will default to the index level setting if request cache is enabled or not).
+ */
+ public NoopSearchRequestBuilder setRequestCache(Boolean requestCache) {
+ request.requestCache(requestCache);
+ return this;
+ }
+
+ /**
+ * Should the query be profiled. Defaults to <code>false</code>
+ */
+ public NoopSearchRequestBuilder setProfile(boolean profile) {
+ sourceBuilder().profile(profile);
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ if (request.source() != null) {
+ return request.source().toString();
+ }
+ return new SearchSourceBuilder().toString();
+ }
+
+ private SearchSourceBuilder sourceBuilder() {
+ if (request.source() == null) {
+ request.source(new SearchSourceBuilder());
+ }
+ return request.source();
+ }
+}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java
new file mode 100644
index 0000000000..3520876af0
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.search;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.action.RestStatusToXContentListener;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+public class RestNoopSearchAction extends BaseRestHandler {
+
+ @Inject
+ public RestNoopSearchAction(Settings settings, RestController controller) {
+ super(settings);
+ controller.registerHandler(GET, "/_noop_search", this);
+ controller.registerHandler(POST, "/_noop_search", this);
+ controller.registerHandler(GET, "/{index}/_noop_search", this);
+ controller.registerHandler(POST, "/{index}/_noop_search", this);
+ controller.registerHandler(GET, "/{index}/{type}/_noop_search", this);
+ controller.registerHandler(POST, "/{index}/{type}/_noop_search", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException {
+ SearchRequest searchRequest = new SearchRequest();
+ client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel));
+ }
+}
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java
new file mode 100644
index 0000000000..c4397684bc
--- /dev/null
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.noop.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.profile.SearchProfileShardResults;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Collections;
+
+public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
+ @Inject
+ public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters
+ actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
+ super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
+ SearchRequest::new);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
+ listener.onResponse(new SearchResponse(new InternalSearchResponse(
+ new InternalSearchHits(
+ new InternalSearchHit[0], 0L, 0.0f),
+ new InternalAggregations(Collections.emptyList()),
+ new Suggest(Collections.emptyList()),
+ new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
+ }
+}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java
index ef3b702f2c..02aedb4765 100644
--- a/client/rest/src/main/java/org/elasticsearch/client/Response.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java
@@ -32,7 +32,7 @@ import java.util.Objects;
* Holds an elasticsearch response. It wraps the {@link HttpResponse} returned and associates it with
* its corresponding {@link RequestLine} and {@link HttpHost}.
*/
-public final class Response {
+public class Response {
private final RequestLine requestLine;
private final HttpHost host;
diff --git a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java
index af897ba93d..2f22bfd50f 100644
--- a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java
@@ -33,7 +33,7 @@ public final class ResponseException extends IOException {
private Response response;
- ResponseException(Response response) throws IOException {
+ public ResponseException(Response response) throws IOException {
super(buildMessage(response));
this.response = response;
}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java
index 84863580bb..d2301e1e8e 100644
--- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java
@@ -65,19 +65,23 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
- * Client that connects to an elasticsearch cluster through http.
+ * Client that connects to an Elasticsearch cluster through HTTP.
+ * <p>
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
* by calling {@link #setHosts(HttpHost...)}.
+ * <p>
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
* deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
- *
+ * <p>
+ * Requests can be either synchronous or asynchronous. The asynchronous variants all end with {@code Async}.
+ * <p>
* Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
*/
-public final class RestClient implements Closeable {
+public class RestClient implements Closeable {
private static final Log logger = LogFactory.getLog(RestClient.class);
@@ -85,17 +89,19 @@ public final class RestClient implements Closeable {
//we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
private final Header[] defaultHeaders;
private final long maxRetryTimeoutMillis;
+ private final String pathPrefix;
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
private volatile Set<HttpHost> hosts;
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final FailureListener failureListener;
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
- HttpHost[] hosts, FailureListener failureListener) {
+ HttpHost[] hosts, String pathPrefix, FailureListener failureListener) {
this.client = client;
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
this.defaultHeaders = defaultHeaders;
this.failureListener = failureListener;
+ this.pathPrefix = pathPrefix;
setHosts(hosts);
}
@@ -124,41 +130,41 @@ public final class RestClient implements Closeable {
}
/**
- * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
+ * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
* and request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param headers the optional request headers
- * @return the response returned by elasticsearch
+ * @return the response returned by Elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
- * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
}
/**
- * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
+ * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param headers the optional request headers
- * @return the response returned by elasticsearch
+ * @return the response returned by Elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
- * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
return performRequest(method, endpoint, params, (HttpEntity)null, headers);
}
/**
- * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
+ * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)}
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* will be used to consume the response body.
@@ -168,10 +174,10 @@ public final class RestClient implements Closeable {
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param headers the optional request headers
- * @return the response returned by elasticsearch
+ * @return the response returned by Elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
- * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, Header... headers) throws IOException {
@@ -180,7 +186,7 @@ public final class RestClient implements Closeable {
}
/**
- * Sends a request to the elasticsearch cluster that the client points to. Blocks until the request is completed and returns
+ * Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
@@ -193,37 +199,37 @@ public final class RestClient implements Closeable {
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
* body gets streamed from a non-blocking HTTP connection on the client side.
* @param headers the optional request headers
- * @return the response returned by elasticsearch
+ * @return the response returned by Elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
- * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
Header... headers) throws IOException {
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
- performRequest(method, endpoint, params, entity, responseConsumer, listener, headers);
+ performRequestAsync(method, endpoint, params, entity, responseConsumer, listener, headers);
return listener.get();
}
/**
- * Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
+ * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
- * {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
+ * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
- public void performRequest(String method, String endpoint, ResponseListener responseListener, Header... headers) {
- performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
+ public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
+ performRequestAsync(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
}
/**
- * Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
+ * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
- * {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
+ * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
@@ -231,15 +237,15 @@ public final class RestClient implements Closeable {
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
- public void performRequest(String method, String endpoint, Map<String, String> params,
- ResponseListener responseListener, Header... headers) {
- performRequest(method, endpoint, params, null, responseListener, headers);
+ public void performRequestAsync(String method, String endpoint, Map<String, String> params,
+ ResponseListener responseListener, Header... headers) {
+ performRequestAsync(method, endpoint, params, null, responseListener, headers);
}
/**
- * Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
+ * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure.
- * Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
+ * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* will be used to consume the response body.
*
@@ -250,14 +256,14 @@ public final class RestClient implements Closeable {
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
- public void performRequest(String method, String endpoint, Map<String, String> params,
- HttpEntity entity, ResponseListener responseListener, Header... headers) {
+ public void performRequestAsync(String method, String endpoint, Map<String, String> params,
+ HttpEntity entity, ResponseListener responseListener, Header... headers) {
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
- performRequest(method, endpoint, params, entity, responseConsumer, responseListener, headers);
+ performRequestAsync(method, endpoint, params, entity, responseConsumer, responseListener, headers);
}
/**
- * Sends a request to the elasticsearch cluster that the client points to. The request is executed asynchronously
+ * Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
@@ -273,20 +279,20 @@ public final class RestClient implements Closeable {
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
- public void performRequest(String method, String endpoint, Map<String, String> params,
- HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
- ResponseListener responseListener, Header... headers) {
- URI uri = buildUri(endpoint, params);
+ public void performRequestAsync(String method, String endpoint, Map<String, String> params,
+ HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
+ ResponseListener responseListener, Header... headers) {
+ URI uri = buildUri(pathPrefix, endpoint, params);
HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers);
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long startTime = System.nanoTime();
- performRequest(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
+ performRequestAsync(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
}
- private void performRequest(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
- final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
- final FailureTrackingResponseListener listener) {
+ private void performRequestAsync(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
+ final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
+ final FailureTrackingResponseListener listener) {
final HttpHost host = hosts.next();
//we stream the request body if the entity allows for it
HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
@@ -340,7 +346,7 @@ public final class RestClient implements Closeable {
} else {
listener.trackFailure(exception);
request.reset();
- performRequest(startTime, hosts, request, responseConsumer, listener);
+ performRequestAsync(startTime, hosts, request, responseConsumer, listener);
}
} else {
listener.onDefinitiveFailure(exception);
@@ -356,12 +362,17 @@ public final class RestClient implements Closeable {
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
Objects.requireNonNull(requestHeaders, "request headers must not be null");
- for (Header defaultHeader : defaultHeaders) {
- httpRequest.setHeader(defaultHeader);
- }
+ // request headers override default headers, so we don't add default headers if they exist as request headers
+ final Set<String> requestNames = new HashSet<>(requestHeaders.length);
for (Header requestHeader : requestHeaders) {
Objects.requireNonNull(requestHeader, "request header must not be null");
- httpRequest.setHeader(requestHeader);
+ httpRequest.addHeader(requestHeader);
+ requestNames.add(requestHeader.getName());
+ }
+ for (Header defaultHeader : defaultHeaders) {
+ if (requestNames.contains(defaultHeader.getName()) == false) {
+ httpRequest.addHeader(defaultHeader);
+ }
}
}
@@ -497,10 +508,21 @@ public final class RestClient implements Closeable {
return httpRequest;
}
- private static URI buildUri(String path, Map<String, String> params) {
+ private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
Objects.requireNonNull(params, "params must not be null");
try {
- URIBuilder uriBuilder = new URIBuilder(path);
+ String fullPath;
+ if (pathPrefix != null) {
+ if (path.startsWith("/")) {
+ fullPath = pathPrefix + path;
+ } else {
+ fullPath = pathPrefix + "/" + path;
+ }
+ } else {
+ fullPath = path;
+ }
+
+ URIBuilder uriBuilder = new URIBuilder(fullPath);
for (Map.Entry<String, String> param : params.entrySet()) {
uriBuilder.addParameter(param.getKey(), param.getValue());
}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java
index 4d5b72eba4..d342d59ade 100644
--- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java
@@ -51,12 +51,17 @@ public final class RestClientBuilder {
private RestClient.FailureListener failureListener;
private HttpClientConfigCallback httpClientConfigCallback;
private RequestConfigCallback requestConfigCallback;
+ private String pathPrefix;
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
+ *
+ * @throws NullPointerException if {@code hosts} or any host is {@code null}.
+ * @throws IllegalArgumentException if {@code hosts} is empty.
*/
RestClientBuilder(HttpHost... hosts) {
- if (hosts == null || hosts.length == 0) {
+ Objects.requireNonNull(hosts, "hosts must not be null");
+ if (hosts.length == 0) {
throw new IllegalArgumentException("no hosts provided");
}
for (HttpHost host : hosts) {
@@ -66,7 +71,11 @@ public final class RestClientBuilder {
}
/**
- * Sets the default request headers, which will be sent along with each request
+ * Sets the default request headers, which will be sent along with each request.
+ * <p>
+ * Request-time headers will always overwrite any default headers.
+ *
+ * @throws NullPointerException if {@code defaultHeaders} or any header is {@code null}.
*/
public RestClientBuilder setDefaultHeaders(Header[] defaultHeaders) {
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
@@ -79,6 +88,8 @@ public final class RestClientBuilder {
/**
* Sets the {@link RestClient.FailureListener} to be notified for each request failure
+ *
+ * @throws NullPointerException if {@code failureListener} is {@code null}.
*/
public RestClientBuilder setFailureListener(RestClient.FailureListener failureListener) {
Objects.requireNonNull(failureListener, "failureListener must not be null");
@@ -90,7 +101,7 @@ public final class RestClientBuilder {
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
*
- * @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
+ * @throws IllegalArgumentException if {@code maxRetryTimeoutMillis} is not greater than 0
*/
public RestClientBuilder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
if (maxRetryTimeoutMillis <= 0) {
@@ -102,6 +113,8 @@ public final class RestClientBuilder {
/**
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
+ *
+ * @throws NullPointerException if {@code httpClientConfigCallback} is {@code null}.
*/
public RestClientBuilder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
@@ -111,6 +124,8 @@ public final class RestClientBuilder {
/**
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
+ *
+ * @throws NullPointerException if {@code requestConfigCallback} is {@code null}.
*/
public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
@@ -119,6 +134,43 @@ public final class RestClientBuilder {
}
/**
+ * Sets the path's prefix for every request used by the http client.
+ * <p>
+ * For example, if this is set to "/my/path", then any client request will become <code>"/my/path/" + endpoint</code>.
+ * <p>
+ * In essence, every request's {@code endpoint} is prefixed by this {@code pathPrefix}. The path prefix is useful for when
+ * Elasticsearch is behind a proxy that provides a base path; it is not intended for other purposes and it should not be supplied in
+ * other scenarios.
+ *
+ * @throws NullPointerException if {@code pathPrefix} is {@code null}.
+ * @throws IllegalArgumentException if {@code pathPrefix} is empty, only '/', or ends with more than one '/'.
+ */
+ public RestClientBuilder setPathPrefix(String pathPrefix) {
+ Objects.requireNonNull(pathPrefix, "pathPrefix must not be null");
+ String cleanPathPrefix = pathPrefix;
+
+ if (cleanPathPrefix.startsWith("/") == false) {
+ cleanPathPrefix = "/" + cleanPathPrefix;
+ }
+
+ // best effort to ensure that it looks like "/base/path" rather than "/base/path/"
+ if (cleanPathPrefix.endsWith("/")) {
+ cleanPathPrefix = cleanPathPrefix.substring(0, cleanPathPrefix.length() - 1);
+
+ if (cleanPathPrefix.endsWith("/")) {
+ throw new IllegalArgumentException("pathPrefix is malformed. too many trailing slashes: [" + pathPrefix + "]");
+ }
+ }
+
+ if (cleanPathPrefix.isEmpty() || "/".equals(cleanPathPrefix)) {
+ throw new IllegalArgumentException("pathPrefix must not be empty or '/': [" + pathPrefix + "]");
+ }
+
+ this.pathPrefix = cleanPathPrefix;
+ return this;
+ }
+
+ /**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
@@ -126,7 +178,7 @@ public final class RestClientBuilder {
failureListener = new RestClient.FailureListener();
}
CloseableHttpAsyncClient httpClient = createHttpClient();
- RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
+ RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener);
httpClient.start();
return restClient;
}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java
index ca67186212..c9243d3aaf 100644
--- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java
@@ -19,7 +19,6 @@
package org.elasticsearch.client;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
@@ -28,8 +27,10 @@ import org.apache.http.message.BasicHeader;
import java.io.IOException;
+import static org.hamcrest.Matchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
public class RestClientBuilderTests extends RestClientTestCase {
@@ -38,8 +39,8 @@ public class RestClientBuilderTests extends RestClientTestCase {
try {
RestClient.builder((HttpHost[])null);
fail("should have failed");
- } catch(IllegalArgumentException e) {
- assertEquals("no hosts provided", e.getMessage());
+ } catch(NullPointerException e) {
+ assertEquals("hosts must not be null", e.getMessage());
}
try {
@@ -62,7 +63,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
try {
RestClient.builder(new HttpHost("localhost", 9200))
- .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
+ .setMaxRetryTimeoutMillis(randomIntBetween(Integer.MIN_VALUE, 0));
fail("should have failed");
} catch(IllegalArgumentException e) {
assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage());
@@ -103,13 +104,13 @@ public class RestClientBuilderTests extends RestClientTestCase {
assertEquals("requestConfigCallback must not be null", e.getMessage());
}
- int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ int numNodes = randomIntBetween(1, 5);
HttpHost[] hosts = new HttpHost[numNodes];
for (int i = 0; i < numNodes; i++) {
hosts[i] = new HttpHost("localhost", 9200 + i);
}
RestClientBuilder builder = RestClient.builder(hosts);
- if (getRandom().nextBoolean()) {
+ if (randomBoolean()) {
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
@@ -117,7 +118,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
});
}
- if (getRandom().nextBoolean()) {
+ if (randomBoolean()) {
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
@@ -125,19 +126,55 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
});
}
- if (getRandom().nextBoolean()) {
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ if (randomBoolean()) {
+ int numHeaders = randomIntBetween(1, 5);
Header[] headers = new Header[numHeaders];
for (int i = 0; i < numHeaders; i++) {
headers[i] = new BasicHeader("header" + i, "value");
}
builder.setDefaultHeaders(headers);
}
- if (getRandom().nextBoolean()) {
- builder.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
+ if (randomBoolean()) {
+ builder.setMaxRetryTimeoutMillis(randomIntBetween(1, Integer.MAX_VALUE));
+ }
+ if (randomBoolean()) {
+ String pathPrefix = (randomBoolean() ? "/" : "") + randomAsciiOfLengthBetween(2, 5);
+ while (pathPrefix.length() < 20 && randomBoolean()) {
+ pathPrefix += "/" + randomAsciiOfLengthBetween(3, 6);
+ }
+ builder.setPathPrefix(pathPrefix + (randomBoolean() ? "/" : ""));
}
try (RestClient restClient = builder.build()) {
assertNotNull(restClient);
}
}
+
+ public void testSetPathPrefixNull() {
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null);
+ fail("pathPrefix set to null should fail!");
+ } catch (final NullPointerException e) {
+ assertEquals("pathPrefix must not be null", e.getMessage());
+ }
+ }
+
+ public void testSetPathPrefixEmpty() {
+ assertSetPathPrefixThrows("/");
+ assertSetPathPrefixThrows("");
+ }
+
+ public void testSetPathPrefixMalformed() {
+ assertSetPathPrefixThrows("//");
+ assertSetPathPrefixThrows("base/path//");
+ }
+
+ private static void assertSetPathPrefixThrows(final String pathPrefix) {
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(pathPrefix);
+ fail("path prefix [" + pathPrefix + "] should have failed");
+ } catch (final IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(pathPrefix));
+ }
+ }
+
}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java
index 455affea9d..9c5c50946d 100644
--- a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java
@@ -19,18 +19,15 @@
package org.elasticsearch.client;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
-import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import com.sun.net.httpserver.Headers;
+import com.sun.net.httpserver.HttpContext;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.apache.http.Consts;
import org.apache.http.Header;
-import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.entity.StringEntity;
-import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
import org.junit.AfterClass;
@@ -60,6 +57,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
@@ -81,13 +79,8 @@ public class RestClientIntegTests extends RestClientTestCase {
for (int statusCode : getAllStatusCodes()) {
createStatusCodeContext(httpServer, statusCode);
}
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
- defaultHeaders = new Header[numHeaders];
- for (int i = 0; i < numHeaders; i++) {
- String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
- String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
- defaultHeaders[i] = new BasicHeader(headerName, headerValue);
- }
+ int numHeaders = randomIntBetween(0, 5);
+ defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
.setDefaultHeaders(defaultHeaders).build();
}
@@ -146,44 +139,43 @@ public class RestClientIntegTests extends RestClientTestCase {
*/
public void testHeaders() throws IOException {
for (String method : getHttpMethods()) {
- Set<String> standardHeaders = new HashSet<>(
- Arrays.asList("Connection", "Host", "User-agent", "Date"));
+ final Set<String> standardHeaders = new HashSet<>(Arrays.asList("Connection", "Host", "User-agent", "Date"));
if (method.equals("HEAD") == false) {
standardHeaders.add("Content-length");
}
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
- Map<String, String> expectedHeaders = new HashMap<>();
- for (Header defaultHeader : defaultHeaders) {
- expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
- }
- Header[] headers = new Header[numHeaders];
- for (int i = 0; i < numHeaders; i++) {
- String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
- String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
- headers[i] = new BasicHeader(headerName, headerValue);
- expectedHeaders.put(headerName, headerValue);
- }
- int statusCode = randomStatusCode(getRandom());
+ final int numHeaders = randomIntBetween(1, 5);
+ final Header[] headers = generateHeaders("Header", "Header-array", numHeaders);
+ final Map<String, List<String>> expectedHeaders = new HashMap<>();
+
+ addHeaders(expectedHeaders, defaultHeaders, headers);
+
+ final int statusCode = randomStatusCode(getRandom());
Response esResponse;
try {
- esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(),
- (HttpEntity)null, headers);
+ esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), headers);
} catch(ResponseException e) {
esResponse = e.getResponse();
}
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
- for (Header responseHeader : esResponse.getHeaders()) {
- if (responseHeader.getName().startsWith("Header")) {
- String headerValue = expectedHeaders.remove(responseHeader.getName());
- assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
+ for (final Header responseHeader : esResponse.getHeaders()) {
+ final String name = responseHeader.getName();
+ final String value = responseHeader.getValue();
+ if (name.startsWith("Header")) {
+ final List<String> values = expectedHeaders.get(name);
+ assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
+ assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
+
+ // we've collected them all
+ if (values.isEmpty()) {
+ expectedHeaders.remove(name);
+ }
} else {
- assertTrue("unknown header was returned " + responseHeader.getName(),
- standardHeaders.remove(responseHeader.getName()));
+ assertTrue("unknown header was returned " + name, standardHeaders.remove(name));
}
}
- assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size());
- assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size());
+ assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty());
+ assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty());
}
}
@@ -205,6 +197,38 @@ public class RestClientIntegTests extends RestClientTestCase {
bodyTest("GET");
}
+ /**
+ * Ensure that pathPrefix works as expected.
+ */
+ public void testPathPrefix() throws IOException {
+ // guarantee no other test setup collides with this one and lets it sneak through
+ final String uniqueContextSuffix = "/testPathPrefix";
+ final String pathPrefix = "base/" + randomAsciiOfLengthBetween(1, 5) + "/";
+ final int statusCode = randomStatusCode(getRandom());
+
+ final HttpContext context =
+ httpServer.createContext("/" + pathPrefix + statusCode + uniqueContextSuffix, new ResponseHandler(statusCode));
+
+ try (final RestClient client =
+ RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
+ .setPathPrefix((randomBoolean() ? "/" : "") + pathPrefix).build()) {
+
+ for (final String method : getHttpMethods()) {
+ Response esResponse;
+ try {
+ esResponse = client.performRequest(method, "/" + statusCode + uniqueContextSuffix);
+ } catch(ResponseException e) {
+ esResponse = e.getResponse();
+ }
+
+ assertThat(esResponse.getRequestLine().getUri(), equalTo("/" + pathPrefix + statusCode + uniqueContextSuffix));
+ assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
+ }
+ } finally {
+ httpServer.removeContext(context);
+ }
+ }
+
private void bodyTest(String method) throws IOException {
String requestBody = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(requestBody);
@@ -226,7 +250,7 @@ public class RestClientIntegTests extends RestClientTestCase {
for (int i = 0; i < numRequests; i++) {
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
final int statusCode = randomStatusCode(getRandom());
- restClient.performRequest(method, "/" + statusCode, new ResponseListener() {
+ restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() {
@Override
public void onSuccess(Response response) {
responses.add(new TestResponse(method, statusCode, response));
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java
index 2a2d279689..049a216936 100644
--- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java
@@ -101,7 +101,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
httpHosts[i] = new HttpHost("localhost", 9200 + i);
}
failureListener = new HostsTrackingFailureListener();
- restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
+ restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener);
}
public void testRoundRobinOkStatusCodes() throws IOException {
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java
index e347dfecc1..92e2b0da97 100644
--- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java
@@ -19,8 +19,6 @@
package org.elasticsearch.client;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
-import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
@@ -41,7 +39,6 @@ import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
-import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
@@ -58,7 +55,10 @@ import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.Future;
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
@@ -132,16 +132,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
});
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
- defaultHeaders = new Header[numHeaders];
- for (int i = 0; i < numHeaders; i++) {
- String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
- String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
- defaultHeaders[i] = new BasicHeader(headerName, headerValue);
- }
+ int numHeaders = randomIntBetween(0, 3);
+ defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
httpHost = new HttpHost("localhost", 9200);
failureListener = new HostsTrackingFailureListener();
- restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
+ restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener);
}
/**
@@ -333,20 +328,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
*/
public void testHeaders() throws IOException {
for (String method : getHttpMethods()) {
- Map<String, String> expectedHeaders = new HashMap<>();
- for (Header defaultHeader : defaultHeaders) {
- expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
- }
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
- Header[] headers = new Header[numHeaders];
- for (int i = 0; i < numHeaders; i++) {
- String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
- String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
- headers[i] = new BasicHeader(headerName, headerValue);
- expectedHeaders.put(headerName, headerValue);
- }
+ final int numHeaders = randomIntBetween(1, 5);
+ final Header[] headers = generateHeaders("Header", null, numHeaders);
+ final Map<String, List<String>> expectedHeaders = new HashMap<>();
- int statusCode = randomStatusCode(getRandom());
+ addHeaders(expectedHeaders, defaultHeaders, headers);
+
+ final int statusCode = randomStatusCode(getRandom());
Response esResponse;
try {
esResponse = restClient.performRequest(method, "/" + statusCode, headers);
@@ -355,10 +343,18 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
for (Header responseHeader : esResponse.getHeaders()) {
- String headerValue = expectedHeaders.remove(responseHeader.getName());
- assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
+ final String name = responseHeader.getName();
+ final String value = responseHeader.getValue();
+ final List<String> values = expectedHeaders.get(name);
+ assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
+ assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
+
+ // we've collected them all
+ if (values.isEmpty()) {
+ expectedHeaders.remove(name);
+ }
}
- assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size());
+ assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty());
}
}
@@ -368,11 +364,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
Map<String, String> params = Collections.emptyMap();
boolean hasParams = randomBoolean();
if (hasParams) {
- int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
+ int numParams = randomIntBetween(1, 3);
params = new HashMap<>(numParams);
for (int i = 0; i < numParams; i++) {
String paramKey = "param-" + i;
- String paramValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ String paramValue = randomAsciiOfLengthBetween(3, 10);
params.put(paramKey, paramValue);
uriBuilder.addParameter(paramKey, paramValue);
}
@@ -412,24 +408,24 @@ public class RestClientSingleHostTests extends RestClientTestCase {
HttpEntity entity = null;
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
if (hasBody) {
- entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
+ entity = new StringEntity(randomAsciiOfLengthBetween(10, 100));
((HttpEntityEnclosingRequest) request).setEntity(entity);
}
Header[] headers = new Header[0];
- for (Header defaultHeader : defaultHeaders) {
- //default headers are expected but not sent for each request
- request.setHeader(defaultHeader);
+ final int numHeaders = randomIntBetween(1, 5);
+ final Set<String> uniqueNames = new HashSet<>(numHeaders);
+ if (randomBoolean()) {
+ headers = generateHeaders("Header", "Header-array", numHeaders);
+ for (Header header : headers) {
+ request.addHeader(header);
+ uniqueNames.add(header.getName());
+ }
}
- if (getRandom().nextBoolean()) {
- int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
- headers = new Header[numHeaders];
- for (int i = 0; i < numHeaders; i++) {
- String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
- String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
- BasicHeader basicHeader = new BasicHeader(headerName, headerValue);
- headers[i] = basicHeader;
- request.setHeader(basicHeader);
+ for (Header defaultHeader : defaultHeaders) {
+ // request level headers override default headers
+ if (uniqueNames.contains(defaultHeader.getName()) == false) {
+ request.addHeader(defaultHeader);
}
}
@@ -459,4 +455,5 @@ public class RestClientSingleHostTests extends RestClientTestCase {
throw new UnsupportedOperationException();
}
}
+
}
diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
index 247206bbbc..89a7d9df8e 100644
--- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
+++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
@@ -42,7 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
* previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
*/
-public final class Sniffer implements Closeable {
+public class Sniffer implements Closeable {
private static final Log logger = LogFactory.getLog(Sniffer.class);
diff --git a/client/test/build.gradle b/client/test/build.gradle
index 05d044504e..e57d415e9e 100644
--- a/client/test/build.gradle
+++ b/client/test/build.gradle
@@ -26,10 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
-install.enabled = false
-uploadArchives.enabled = false
-
dependencies {
+ compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
compile "junit:junit:${versions.junit}"
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
@@ -60,4 +58,4 @@ namingConventions.enabled = false
//we aren't releasing this jar
thirdPartyAudit.enabled = false
-test.enabled = false \ No newline at end of file
+test.enabled = false
diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java
index 8c506beb5a..4296932a00 100644
--- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java
+++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java
@@ -31,6 +31,15 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.http.Header;
+import org.apache.http.message.BasicHeader;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
@TestMethodProviders({
JUnit3MethodProvider.class
})
@@ -43,4 +52,71 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
@TimeoutSuite(millis = 2 * 60 * 60 * 1000)
public abstract class RestClientTestCase extends RandomizedTest {
+ /**
+ * Create the specified number of {@link Header}s.
+ * <p>
+ * Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied.
+ *
+ * @param baseName The base name to use for all headers.
+ * @param arrayName The optional ({@code null}able) array name to use randomly.
+ * @param headers The number of headers to create.
+ * @return Never {@code null}.
+ */
+ protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) {
+ final Header[] generated = new Header[headers];
+ for (int i = 0; i < headers; i++) {
+ String headerName = baseName + i;
+ if (arrayName != null && rarely()) {
+ headerName = arrayName;
+ }
+
+ generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10));
+ }
+ return generated;
+ }
+
+ /**
+ * Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list.
+ *
+ * @param map The map to manipulate.
+ * @param name The name to create/append the list for.
+ * @param value The value to add.
+ */
+ private static void createOrAppendList(final Map<String, List<String>> map, final String name, final String value) {
+ List<String> values = map.get(name);
+
+ if (values == null) {
+ values = new ArrayList<>();
+ map.put(name, values);
+ }
+
+ values.add(value);
+ }
+
+ /**
+ * Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist.
+ * <p>
+ * If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its
+ * {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}.
+ *
+ * @param map The map to build with name/value(s) pairs.
+ * @param defaultHeaders The headers to add to the map representing default headers.
+ * @param headers The headers to add to the map representing request-level headers.
+ * @see #createOrAppendList(Map, String, String)
+ */
+ protected static void addHeaders(final Map<String, List<String>> map, final Header[] defaultHeaders, final Header[] headers) {
+ final Set<String> uniqueHeaders = new HashSet<>();
+ for (final Header header : headers) {
+ final String name = header.getName();
+ createOrAppendList(map, name, header.getValue());
+ uniqueHeaders.add(name);
+ }
+ for (final Header defaultHeader : defaultHeaders) {
+ final String name = defaultHeader.getName();
+ if (uniqueHeaders.contains(name) == false) {
+ createOrAppendList(map, name, defaultHeader.getValue());
+ }
+ }
+ }
+
}
diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java
index 0b7c3380b9..a1d95b68af 100644
--- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java
+++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java
@@ -20,6 +20,7 @@
package org.elasticsearch.transport.client;
import com.carrotsearch.randomizedtesting.RandomizedTest;
+import org.apache.lucene.util.Constants;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -40,6 +41,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
@Test
public void testPluginInstalled() {
+ // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
+ assumeFalse(Constants.JRE_IS_MINIMUM_JAVA9);
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
Settings settings = client.settings();
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
@@ -49,9 +52,7 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
@Test
public void testInstallPluginTwice() {
-
- for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
- MustachePlugin.class)) {
+ for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)) {
try {
new PreBuiltTransportClient(Settings.EMPTY, plugin);
fail("exception expected");
diff --git a/core/build.gradle b/core/build.gradle
index 0e87c21757..ea2e3e27ef 100644
--- a/core/build.gradle
+++ b/core/build.gradle
@@ -22,7 +22,6 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
apply plugin: 'elasticsearch.build'
-apply plugin: 'com.bmuschko.nexus'
apply plugin: 'nebula.optional-base'
apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'
@@ -85,8 +84,10 @@ dependencies {
compile "com.vividsolutions:jts:${versions.jts}", optional
// logging
- compile "log4j:log4j:${versions.log4j}", optional
- compile "log4j:apache-log4j-extras:${versions.log4j}", optional
+ compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
+ compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
+ // to bridge dependencies that are still on Log4j 1 to Log4j 2
+ compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}"
@@ -154,32 +155,94 @@ thirdPartyAudit.excludes = [
// classes are missing!
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
- 'com.fasterxml.jackson.databind.ObjectMapper',
-
- // from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
- 'javax.jms.Message',
- 'javax.jms.MessageListener',
- 'javax.jms.ObjectMessage',
- 'javax.jms.TopicConnection',
- 'javax.jms.TopicConnectionFactory',
- 'javax.jms.TopicPublisher',
- 'javax.jms.TopicSession',
- 'javax.jms.TopicSubscriber',
-
- // from org.apache.log4j.net.SMTPAppender (log4j)
- 'javax.mail.Authenticator',
- 'javax.mail.Message$RecipientType',
- 'javax.mail.Message',
- 'javax.mail.Multipart',
- 'javax.mail.PasswordAuthentication',
- 'javax.mail.Session',
- 'javax.mail.Transport',
- 'javax.mail.internet.InternetAddress',
- 'javax.mail.internet.InternetHeaders',
- 'javax.mail.internet.MimeBodyPart',
- 'javax.mail.internet.MimeMessage',
- 'javax.mail.internet.MimeMultipart',
- 'javax.mail.internet.MimeUtility',
+ 'com.fasterxml.jackson.databind.ObjectMapper',
+
+ // from log4j
+ 'com.fasterxml.jackson.annotation.JsonInclude$Include',
+ 'com.fasterxml.jackson.databind.DeserializationContext',
+ 'com.fasterxml.jackson.databind.JsonMappingException',
+ 'com.fasterxml.jackson.databind.JsonNode',
+ 'com.fasterxml.jackson.databind.Module$SetupContext',
+ 'com.fasterxml.jackson.databind.ObjectReader',
+ 'com.fasterxml.jackson.databind.ObjectWriter',
+ 'com.fasterxml.jackson.databind.SerializerProvider',
+ 'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
+ 'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
+ 'com.fasterxml.jackson.databind.module.SimpleModule',
+ 'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
+ 'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
+ 'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
+ 'com.fasterxml.jackson.databind.ser.std.StdSerializer',
+ 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
+ 'com.fasterxml.jackson.dataformat.xml.XmlMapper',
+ 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
+ 'com.lmax.disruptor.BlockingWaitStrategy',
+ 'com.lmax.disruptor.BusySpinWaitStrategy',
+ 'com.lmax.disruptor.EventFactory',
+ 'com.lmax.disruptor.EventTranslator',
+ 'com.lmax.disruptor.EventTranslatorTwoArg',
+ 'com.lmax.disruptor.EventTranslatorVararg',
+ 'com.lmax.disruptor.ExceptionHandler',
+ 'com.lmax.disruptor.LifecycleAware',
+ 'com.lmax.disruptor.RingBuffer',
+ 'com.lmax.disruptor.Sequence',
+ 'com.lmax.disruptor.SequenceReportingEventHandler',
+ 'com.lmax.disruptor.SleepingWaitStrategy',
+ 'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
+ 'com.lmax.disruptor.WaitStrategy',
+ 'com.lmax.disruptor.YieldingWaitStrategy',
+ 'com.lmax.disruptor.dsl.Disruptor',
+ 'com.lmax.disruptor.dsl.ProducerType',
+ 'javax.jms.Connection',
+ 'javax.jms.ConnectionFactory',
+ 'javax.jms.Destination',
+ 'javax.jms.Message',
+ 'javax.jms.MessageConsumer',
+ 'javax.jms.MessageListener',
+ 'javax.jms.MessageProducer',
+ 'javax.jms.ObjectMessage',
+ 'javax.jms.Session',
+ 'javax.mail.Authenticator',
+ 'javax.mail.Message$RecipientType',
+ 'javax.mail.PasswordAuthentication',
+ 'javax.mail.Session',
+ 'javax.mail.Transport',
+ 'javax.mail.internet.InternetAddress',
+ 'javax.mail.internet.InternetHeaders',
+ 'javax.mail.internet.MimeBodyPart',
+ 'javax.mail.internet.MimeMessage',
+ 'javax.mail.internet.MimeMultipart',
+ 'javax.mail.internet.MimeUtility',
+ 'javax.mail.util.ByteArrayDataSource',
+ 'javax.persistence.AttributeConverter',
+ 'javax.persistence.EntityManager',
+ 'javax.persistence.EntityManagerFactory',
+ 'javax.persistence.EntityTransaction',
+ 'javax.persistence.Persistence',
+ 'javax.persistence.PersistenceException',
+ 'org.apache.commons.compress.compressors.CompressorStreamFactory',
+ 'org.apache.commons.compress.utils.IOUtils',
+ 'org.apache.commons.csv.CSVFormat',
+ 'org.apache.commons.csv.QuoteMode',
+ 'org.apache.kafka.clients.producer.KafkaProducer',
+ 'org.apache.kafka.clients.producer.Producer',
+ 'org.apache.kafka.clients.producer.ProducerRecord',
+ 'org.codehaus.stax2.XMLStreamWriter2',
+ 'org.osgi.framework.AdaptPermission',
+ 'org.osgi.framework.AdminPermission',
+ 'org.osgi.framework.Bundle',
+ 'org.osgi.framework.BundleActivator',
+ 'org.osgi.framework.BundleContext',
+ 'org.osgi.framework.BundleEvent',
+ 'org.osgi.framework.BundleReference',
+ 'org.osgi.framework.FrameworkUtil',
+ 'org.osgi.framework.SynchronousBundleListener',
+ 'org.osgi.framework.wiring.BundleWire',
+ 'org.osgi.framework.wiring.BundleWiring',
+ 'org.zeromq.ZMQ$Context',
+ 'org.zeromq.ZMQ$Socket',
+ 'org.zeromq.ZMQ',
+
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser',
]
diff --git a/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java b/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java
new file mode 100644
index 0000000000..37ab0a1539
--- /dev/null
+++ b/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java
@@ -0,0 +1,665 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache license, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license for the specific language governing permissions and
+ * limitations under the license.
+ */
+
+package org.apache.logging.log4j.core.impl;
+
+import java.io.Serializable;
+import java.net.URL;
+import java.security.CodeSource;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
+
+import org.apache.logging.log4j.core.util.Loader;
+import org.apache.logging.log4j.status.StatusLogger;
+import org.apache.logging.log4j.util.ReflectionUtil;
+import org.apache.logging.log4j.util.Strings;
+
+/**
+ * Wraps a Throwable to add packaging information about each stack trace element.
+ *
+ * <p>
+ * A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application
+ * deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other
+ * fields of the proxy like the message and stack trace.
+ * </p>
+ *
+ * <p>
+ * TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent.
+ * </p>
+ * <p>
+ * TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader?
+ * </p>
+ */
+public class ThrowableProxy implements Serializable {
+
+ private static final String CAUSED_BY_LABEL = "Caused by: ";
+ private static final String SUPPRESSED_LABEL = "Suppressed: ";
+ private static final String WRAPPED_BY_LABEL = "Wrapped by: ";
+
+ /**
+ * Cached StackTracePackageElement and ClassLoader.
+ * <p>
+ * Consider this class private.
+ * </p>
+ */
+ static class CacheEntry {
+ private final ExtendedClassInfo element;
+ private final ClassLoader loader;
+
+ public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) {
+ this.element = element;
+ this.loader = loader;
+ }
+ }
+
+ private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0];
+
+ private static final char EOL = '\n';
+
+ private static final long serialVersionUID = -2752771578252251910L;
+
+ private final ThrowableProxy causeProxy;
+
+ private int commonElementCount;
+
+ private final ExtendedStackTraceElement[] extendedStackTrace;
+
+ private final String localizedMessage;
+
+ private final String message;
+
+ private final String name;
+
+ private final ThrowableProxy[] suppressedProxies;
+
+ private final transient Throwable throwable;
+
+ /**
+ * For JSON and XML IO via Jackson.
+ */
+ @SuppressWarnings("unused")
+ private ThrowableProxy() {
+ this.throwable = null;
+ this.name = null;
+ this.extendedStackTrace = null;
+ this.causeProxy = null;
+ this.message = null;
+ this.localizedMessage = null;
+ this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY;
+ }
+
+ /**
+ * Constructs the wrapper for the Throwable that includes packaging data.
+ *
+ * @param throwable
+ * The Throwable to wrap, must not be null.
+ */
+ public ThrowableProxy(final Throwable throwable) {
+ this(throwable, null);
+ }
+
+ /**
+ * Constructs the wrapper for the Throwable that includes packaging data.
+ *
+ * @param throwable
+ * The Throwable to wrap, must not be null.
+ * @param visited
+ * The set of visited suppressed exceptions.
+ */
+ private ThrowableProxy(final Throwable throwable, final Set<Throwable> visited) {
+ this.throwable = throwable;
+ this.name = throwable.getClass().getName();
+ this.message = throwable.getMessage();
+ this.localizedMessage = throwable.getLocalizedMessage();
+ final Map<String, CacheEntry> map = new HashMap<>();
+ final Stack<Class<?>> stack = ReflectionUtil.getCurrentStackTrace();
+ this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace());
+ final Throwable throwableCause = throwable.getCause();
+ final Set<Throwable> causeVisited = new HashSet<>(1);
+ this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited);
+ this.suppressedProxies = this.toSuppressedProxies(throwable, visited);
+ }
+
+ /**
+ * Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable.
+ *
+ * @param parent
+ * The Throwable referencing this Throwable.
+ * @param stack
+ * The Class stack.
+ * @param map
+ * The cache containing the packaging data.
+ * @param cause
+ * The Throwable to wrap.
+ * @param suppressedVisited TODO
+ * @param causeVisited TODO
+ */
+ private ThrowableProxy(final Throwable parent, final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
+ final Throwable cause, final Set<Throwable> suppressedVisited, final Set<Throwable> causeVisited) {
+ causeVisited.add(cause);
+ this.throwable = cause;
+ this.name = cause.getClass().getName();
+ this.message = this.throwable.getMessage();
+ this.localizedMessage = this.throwable.getLocalizedMessage();
+ this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace());
+ final Throwable causeCause = cause.getCause();
+ this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent,
+ stack, map, causeCause, suppressedVisited, causeVisited);
+ this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (this.getClass() != obj.getClass()) {
+ return false;
+ }
+ final ThrowableProxy other = (ThrowableProxy) obj;
+ if (this.causeProxy == null) {
+ if (other.causeProxy != null) {
+ return false;
+ }
+ } else if (!this.causeProxy.equals(other.causeProxy)) {
+ return false;
+ }
+ if (this.commonElementCount != other.commonElementCount) {
+ return false;
+ }
+ if (this.name == null) {
+ if (other.name != null) {
+ return false;
+ }
+ } else if (!this.name.equals(other.name)) {
+ return false;
+ }
+ if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) {
+ return false;
+ }
+ if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) {
+ return false;
+ }
+ return true;
+ }
+
+ private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List<String> ignorePackages) {
+ formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages);
+ }
+
+ private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel,
+ final ThrowableProxy throwableProxy, final List<String> ignorePackages) {
+ if (throwableProxy == null) {
+ return;
+ }
+ sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL);
+ this.formatElements(sb, prefix, throwableProxy.commonElementCount,
+ throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages);
+ this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages);
+ this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages);
+ }
+
+ private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies,
+ final List<String> ignorePackages) {
+ if (suppressedProxies == null) {
+ return;
+ }
+ for (final ThrowableProxy suppressedProxy : suppressedProxies) {
+ final ThrowableProxy cause = suppressedProxy;
+ formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages);
+ }
+ }
+
+ private void formatElements(final StringBuilder sb, final String prefix, final int commonCount,
+ final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace,
+ final List<String> ignorePackages) {
+ if (ignorePackages == null || ignorePackages.isEmpty()) {
+ for (final ExtendedStackTraceElement element : extStackTrace) {
+ this.formatEntry(element, sb, prefix);
+ }
+ } else {
+ int count = 0;
+ for (int i = 0; i < extStackTrace.length; ++i) {
+ if (!this.ignoreElement(causedTrace[i], ignorePackages)) {
+ if (count > 0) {
+ appendSuppressedCount(sb, prefix, count);
+ count = 0;
+ }
+ this.formatEntry(extStackTrace[i], sb, prefix);
+ } else {
+ ++count;
+ }
+ }
+ if (count > 0) {
+ appendSuppressedCount(sb, prefix, count);
+ }
+ }
+ if (commonCount != 0) {
+ sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL);
+ }
+ }
+
+ private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) {
+ sb.append(prefix);
+ if (count == 1) {
+ sb.append("\t....").append(EOL);
+ } else {
+ sb.append("\t... suppressed ").append(count).append(" lines").append(EOL);
+ }
+ }
+
+ private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) {
+ sb.append(prefix);
+ sb.append("\tat ");
+ sb.append(extStackTraceElement);
+ sb.append(EOL);
+ }
+
+ /**
+ * Formats the specified Throwable.
+ *
+ * @param sb
+ * StringBuilder to contain the formatted Throwable.
+ * @param cause
+ * The Throwable to format.
+ */
+ public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) {
+ this.formatWrapper(sb, cause, null);
+ }
+
+ /**
+ * Formats the specified Throwable.
+ *
+ * @param sb
+ * StringBuilder to contain the formatted Throwable.
+ * @param cause
+ * The Throwable to format.
+ * @param packages
+ * The List of packages to be suppressed from the trace.
+ */
+ @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+ public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List<String> packages) {
+ final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null;
+ if (caused != null) {
+ this.formatWrapper(sb, cause.causeProxy);
+ sb.append(WRAPPED_BY_LABEL);
+ }
+ sb.append(cause).append(EOL);
+ this.formatElements(sb, "", cause.commonElementCount,
+ cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages);
+ }
+
+ public ThrowableProxy getCauseProxy() {
+ return this.causeProxy;
+ }
+
+ /**
+ * Format the Throwable that is the cause of this Throwable.
+ *
+ * @return The formatted Throwable that caused this Throwable.
+ */
+ public String getCauseStackTraceAsString() {
+ return this.getCauseStackTraceAsString(null);
+ }
+
+ /**
+ * Format the Throwable that is the cause of this Throwable.
+ *
+ * @param packages
+ * The List of packages to be suppressed from the trace.
+ * @return The formatted Throwable that caused this Throwable.
+ */
+ public String getCauseStackTraceAsString(final List<String> packages) {
+ final StringBuilder sb = new StringBuilder();
+ if (this.causeProxy != null) {
+ this.formatWrapper(sb, this.causeProxy);
+ sb.append(WRAPPED_BY_LABEL);
+ }
+ sb.append(this.toString());
+ sb.append(EOL);
+ this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages);
+ return sb.toString();
+ }
+
+ /**
+ * Return the number of elements that are being omitted because they are common with the parent Throwable's stack
+ * trace.
+ *
+ * @return The number of elements omitted from the stack trace.
+ */
+ public int getCommonElementCount() {
+ return this.commonElementCount;
+ }
+
+ /**
+ * Gets the stack trace including packaging information.
+ *
+ * @return The stack trace including packaging information.
+ */
+ public ExtendedStackTraceElement[] getExtendedStackTrace() {
+ return this.extendedStackTrace;
+ }
+
+ /**
+ * Format the stack trace including packaging information.
+ *
+ * @return The formatted stack trace including packaging information.
+ */
+ public String getExtendedStackTraceAsString() {
+ return this.getExtendedStackTraceAsString(null);
+ }
+
+ /**
+ * Format the stack trace including packaging information.
+ *
+ * @param ignorePackages
+ * List of packages to be ignored in the trace.
+ * @return The formatted stack trace including packaging information.
+ */
+ public String getExtendedStackTraceAsString(final List<String> ignorePackages) {
+ final StringBuilder sb = new StringBuilder(this.name);
+ final String msg = this.message;
+ if (msg != null) {
+ sb.append(": ").append(msg);
+ }
+ sb.append(EOL);
+ final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null;
+ this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages);
+ this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages);
+ this.formatCause(sb, "", this.causeProxy, ignorePackages);
+ return sb.toString();
+ }
+
+ public String getLocalizedMessage() {
+ return this.localizedMessage;
+ }
+
+ public String getMessage() {
+ return this.message;
+ }
+
+ /**
+ * Return the FQCN of the Throwable.
+ *
+ * @return The FQCN of the Throwable.
+ */
+ public String getName() {
+ return this.name;
+ }
+
+ public StackTraceElement[] getStackTrace() {
+ return this.throwable == null ? null : this.throwable.getStackTrace();
+ }
+
+ /**
+ * Gets proxies for suppressed exceptions.
+ *
+ * @return proxies for suppressed exceptions.
+ */
+ public ThrowableProxy[] getSuppressedProxies() {
+ return this.suppressedProxies;
+ }
+
+ /**
+ * Format the suppressed Throwables.
+ *
+ * @return The formatted suppressed Throwables.
+ */
+ public String getSuppressedStackTrace() {
+ final ThrowableProxy[] suppressed = this.getSuppressedProxies();
+ if (suppressed == null || suppressed.length == 0) {
+ return Strings.EMPTY;
+ }
+ final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL);
+ for (final ThrowableProxy proxy : suppressed) {
+ sb.append(proxy.getExtendedStackTraceAsString());
+ }
+ return sb.toString();
+ }
+
+ /**
+ * The throwable or null if this object is deserialized from XML or JSON.
+ *
+ * @return The throwable or null if this object is deserialized from XML or JSON.
+ */
+ public Throwable getThrowable() {
+ return this.throwable;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode());
+ result = prime * result + this.commonElementCount;
+ result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace));
+ result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies));
+ result = prime * result + (this.name == null ? 0 : this.name.hashCode());
+ return result;
+ }
+
+ private boolean ignoreElement(final StackTraceElement element, final List<String> ignorePackages) {
+ final String className = element.getClassName();
+ for (final String pkg : ignorePackages) {
+ if (className.startsWith(pkg)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Loads classes not located via Reflection.getCallerClass.
+ *
+ * @param lastLoader
+ * The ClassLoader that loaded the Class that called this Class.
+ * @param className
+ * The name of the Class.
+ * @return The Class object for the Class or null if it could not be located.
+ */
+ private Class<?> loadClass(final ClassLoader lastLoader, final String className) {
+ // XXX: this is overly complicated
+ Class<?> clazz;
+ if (lastLoader != null) {
+ try {
+ clazz = Loader.initializeClass(className, lastLoader);
+ if (clazz != null) {
+ return clazz;
+ }
+ } catch (final Throwable ignore) {
+ // Ignore exception.
+ }
+ }
+ try {
+ clazz = Loader.loadClass(className);
+ } catch (final ClassNotFoundException ignored) {
+ return initializeClass(className);
+ } catch (final NoClassDefFoundError ignored) {
+ return initializeClass(className);
+ } catch (final SecurityException ignored) {
+ return initializeClass(className);
+ }
+ return clazz;
+ }
+
+ private Class<?> initializeClass(final String className) {
+ try {
+ return Loader.initializeClass(className, this.getClass().getClassLoader());
+ } catch (final ClassNotFoundException ignore) {
+ return null;
+ } catch (final NoClassDefFoundError ignore) {
+ return null;
+ } catch (final SecurityException ignore) {
+ return null;
+ }
+ }
+
+ /**
+ * Construct the CacheEntry from the Class's information.
+ *
+ * @param stackTraceElement
+ * The stack trace element
+ * @param callerClass
+ * The Class.
+ * @param exact
+ * True if the class was obtained via Reflection.getCallerClass.
+ *
+ * @return The CacheEntry.
+ */
+ private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class<?> callerClass,
+ final boolean exact) {
+ String location = "?";
+ String version = "?";
+ ClassLoader lastLoader = null;
+ if (callerClass != null) {
+ try {
+ final CodeSource source = callerClass.getProtectionDomain().getCodeSource();
+ if (source != null) {
+ final URL locationURL = source.getLocation();
+ if (locationURL != null) {
+ final String str = locationURL.toString().replace('\\', '/');
+ int index = str.lastIndexOf("/");
+ if (index >= 0 && index == str.length() - 1) {
+ index = str.lastIndexOf("/", index - 1);
+ location = str.substring(index + 1);
+ } else {
+ location = str.substring(index + 1);
+ }
+ }
+ }
+ } catch (final Exception ex) {
+ // Ignore the exception.
+ }
+ final Package pkg = callerClass.getPackage();
+ if (pkg != null) {
+ final String ver = pkg.getImplementationVersion();
+ if (ver != null) {
+ version = ver;
+ }
+ }
+ lastLoader = callerClass.getClassLoader();
+ }
+ return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader);
+ }
+
+ /**
+ * Resolve all the stack entries in this stack trace that are not common with the parent.
+ *
+ * @param stack
+ * The callers Class stack.
+ * @param map
+ * The cache of CacheEntry objects.
+ * @param rootTrace
+ * The first stack trace resolve or null.
+ * @param stackTrace
+ * The stack trace being resolved.
+ * @return The StackTracePackageElement array.
+ */
+ ExtendedStackTraceElement[] toExtendedStackTrace(final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
+ final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) {
+ int stackLength;
+ if (rootTrace != null) {
+ int rootIndex = rootTrace.length - 1;
+ int stackIndex = stackTrace.length - 1;
+ while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) {
+ --rootIndex;
+ --stackIndex;
+ }
+ this.commonElementCount = stackTrace.length - 1 - stackIndex;
+ stackLength = stackIndex + 1;
+ } else {
+ this.commonElementCount = 0;
+ stackLength = stackTrace.length;
+ }
+ final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength];
+ Class<?> clazz = stack.isEmpty() ? null : stack.peek();
+ ClassLoader lastLoader = null;
+ for (int i = stackLength - 1; i >= 0; --i) {
+ final StackTraceElement stackTraceElement = stackTrace[i];
+ final String className = stackTraceElement.getClassName();
+ // The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke()
+ // and its implementation. The Throwable might also contain stack entries that are no longer
+ // present as those methods have returned.
+ ExtendedClassInfo extClassInfo;
+ if (clazz != null && className.equals(clazz.getName())) {
+ final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true);
+ extClassInfo = entry.element;
+ lastLoader = entry.loader;
+ stack.pop();
+ clazz = stack.isEmpty() ? null : stack.peek();
+ } else {
+ final CacheEntry cacheEntry = map.get(className);
+ if (cacheEntry != null) {
+ final CacheEntry entry = cacheEntry;
+ extClassInfo = entry.element;
+ if (entry.loader != null) {
+ lastLoader = entry.loader;
+ }
+ } else {
+ final CacheEntry entry = this.toCacheEntry(stackTraceElement,
+ this.loadClass(lastLoader, className), false);
+ extClassInfo = entry.element;
+ map.put(stackTraceElement.toString(), entry);
+ if (entry.loader != null) {
+ lastLoader = entry.loader;
+ }
+ }
+ }
+ extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo);
+ }
+ return extStackTrace;
+ }
+
+ @Override
+ public String toString() {
+ final String msg = this.message;
+ return msg != null ? this.name + ": " + msg : this.name;
+ }
+
+ private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set<Throwable> suppressedVisited) {
+ try {
+ final Throwable[] suppressed = thrown.getSuppressed();
+ if (suppressed == null) {
+ return EMPTY_THROWABLE_PROXY_ARRAY;
+ }
+ final List<ThrowableProxy> proxies = new ArrayList<>(suppressed.length);
+ if (suppressedVisited == null) {
+ suppressedVisited = new HashSet<>(proxies.size());
+ }
+ for (int i = 0; i < suppressed.length; i++) {
+ final Throwable candidate = suppressed[i];
+ if (!suppressedVisited.contains(candidate)) {
+ suppressedVisited.add(candidate);
+ proxies.add(new ThrowableProxy(candidate, suppressedVisited));
+ }
+ }
+ return proxies.toArray(new ThrowableProxy[proxies.size()]);
+ } catch (final Exception e) {
+ StatusLogger.getLogger().error(e);
+ }
+ return null;
+ }
+}
diff --git a/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java b/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java
new file mode 100644
index 0000000000..8edf959a4f
--- /dev/null
+++ b/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache license, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license for the specific language governing permissions and
+ * limitations under the license.
+ */
+package org.apache.logging.log4j.core.jmx;
+
+import java.lang.management.ManagementFactory;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.MBeanRegistrationException;
+import javax.management.MBeanServer;
+import javax.management.NotCompliantMBeanException;
+import javax.management.ObjectName;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AsyncAppender;
+import org.apache.logging.log4j.core.async.AsyncLoggerConfig;
+import org.apache.logging.log4j.core.async.AsyncLoggerContext;
+import org.apache.logging.log4j.core.async.DaemonThreadFactory;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.core.impl.Log4jContextFactory;
+import org.apache.logging.log4j.core.selector.ContextSelector;
+import org.apache.logging.log4j.core.util.Constants;
+import org.apache.logging.log4j.spi.LoggerContextFactory;
+import org.apache.logging.log4j.status.StatusLogger;
+import org.apache.logging.log4j.util.PropertiesUtil;
+import org.elasticsearch.common.SuppressForbidden;
+
+/**
+ * Creates MBeans to instrument various classes in the log4j class hierarchy.
+ * <p>
+ * All instrumentation for Log4j 2 classes can be disabled by setting system property {@code -Dlog4j2.disable.jmx=true}.
+ * </p>
+ */
+@SuppressForbidden(reason = "copied class to hack around Log4j bug")
+public final class Server {
+
+ /**
+ * The domain part, or prefix ({@value}) of the {@code ObjectName} of all MBeans that instrument Log4J2 components.
+ */
+ public static final String DOMAIN = "org.apache.logging.log4j2";
+ private static final String PROPERTY_DISABLE_JMX = "log4j2.disable.jmx";
+ private static final String PROPERTY_ASYNC_NOTIF = "log4j2.jmx.notify.async";
+ private static final String THREAD_NAME_PREFIX = "log4j2.jmx.notif";
+ private static final StatusLogger LOGGER = StatusLogger.getLogger();
+ static final Executor executor = isJmxDisabled() ? null : createExecutor();
+
+ private Server() {
+ }
+
+ /**
+ * Returns either a {@code null} Executor (causing JMX notifications to be sent from the caller thread) or a daemon
+ * background thread Executor, depending on the value of system property "log4j2.jmx.notify.async". If this
+ * property is not set, use a {@code null} Executor for web apps to avoid memory leaks and other issues when the
+ * web app is restarted.
+ * @see <a href="https://issues.apache.org/jira/browse/LOG4J2-938">LOG4J2-938</a>
+ */
+ private static ExecutorService createExecutor() {
+ final boolean defaultAsync = !Constants.IS_WEB_APP;
+ final boolean async = PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_ASYNC_NOTIF, defaultAsync);
+ return async ? Executors.newFixedThreadPool(1, new DaemonThreadFactory(THREAD_NAME_PREFIX)) : null;
+ }
+
+ /**
+ * Either returns the specified name as is, or returns a quoted value containing the specified name with the special
+ * characters (comma, equals, colon, quote, asterisk, or question mark) preceded with a backslash.
+ *
+ * @param name the name to escape so it can be used as a value in an {@link ObjectName}.
+ * @return the escaped name
+ */
+ public static String escape(final String name) {
+ final StringBuilder sb = new StringBuilder(name.length() * 2);
+ boolean needsQuotes = false;
+ for (int i = 0; i < name.length(); i++) {
+ final char c = name.charAt(i);
+ switch (c) {
+ case '\\':
+ case '*':
+ case '?':
+ case '\"':
+ // quote, star, question & backslash must be escaped
+ sb.append('\\');
+ needsQuotes = true; // ... and can only appear in quoted value
+ break;
+ case ',':
+ case '=':
+ case ':':
+ // no need to escape these, but value must be quoted
+ needsQuotes = true;
+ break;
+ case '\r':
+ // drop \r characters: \\r gives "invalid escape sequence"
+ continue;
+ case '\n':
+ // replace \n characters with \\n sequence
+ sb.append("\\n");
+ needsQuotes = true;
+ continue;
+ }
+ sb.append(c);
+ }
+ if (needsQuotes) {
+ sb.insert(0, '\"');
+ sb.append('\"');
+ }
+ return sb.toString();
+ }
+
+ private static boolean isJmxDisabled() {
+ return PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_DISABLE_JMX);
+ }
+
+ public static void reregisterMBeansAfterReconfigure() {
+ // avoid creating Platform MBean Server if JMX disabled
+ if (isJmxDisabled()) {
+ LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
+ return;
+ }
+ final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ reregisterMBeansAfterReconfigure(mbs);
+ }
+
+ public static void reregisterMBeansAfterReconfigure(final MBeanServer mbs) {
+ if (isJmxDisabled()) {
+ LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
+ return;
+ }
+
+ // now provide instrumentation for the newly configured
+ // LoggerConfigs and Appenders
+ try {
+ final ContextSelector selector = getContextSelector();
+ if (selector == null) {
+ LOGGER.debug("Could not register MBeans: no ContextSelector found.");
+ return;
+ }
+ LOGGER.trace("Reregistering MBeans after reconfigure. Selector={}", selector);
+ final List<LoggerContext> contexts = selector.getLoggerContexts();
+ int i = 0;
+ for (final LoggerContext ctx : contexts) {
+ LOGGER.trace("Reregistering context ({}/{}): '{}' {}", ++i, contexts.size(), ctx.getName(), ctx);
+ // first unregister the context and all nested loggers,
+ // appenders, statusLogger, contextSelector, ringbuffers...
+ unregisterLoggerContext(ctx.getName(), mbs);
+
+ final LoggerContextAdmin mbean = new LoggerContextAdmin(ctx, executor);
+ register(mbs, mbean, mbean.getObjectName());
+
+ if (ctx instanceof AsyncLoggerContext) {
+ final RingBufferAdmin rbmbean = ((AsyncLoggerContext) ctx).createRingBufferAdmin();
+ if (rbmbean.getBufferSize() > 0) {
+ // don't register if Disruptor not started (DefaultConfiguration: config not found)
+ register(mbs, rbmbean, rbmbean.getObjectName());
+ }
+ }
+
+ // register the status logger and the context selector
+ // repeatedly
+ // for each known context: if one context is unregistered,
+ // these MBeans should still be available for the other
+ // contexts.
+ registerStatusLogger(ctx.getName(), mbs, executor);
+ registerContextSelector(ctx.getName(), selector, mbs, executor);
+
+ registerLoggerConfigs(ctx, mbs, executor);
+ registerAppenders(ctx, mbs, executor);
+ }
+ } catch (final Exception ex) {
+ LOGGER.error("Could not register mbeans", ex);
+ }
+ }
+
+ /**
+ * Unregister all log4j MBeans from the platform MBean server.
+ */
+ public static void unregisterMBeans() {
+ if (isJmxDisabled()) {
+ LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
+ return;
+ }
+ final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ unregisterMBeans(mbs);
+ }
+
+ /**
+ * Unregister all log4j MBeans from the specified MBean server.
+ *
+ * @param mbs the MBean server to unregister from.
+ */
+ public static void unregisterMBeans(final MBeanServer mbs) {
+ unregisterStatusLogger("*", mbs);
+ unregisterContextSelector("*", mbs);
+ unregisterContexts(mbs);
+ unregisterLoggerConfigs("*", mbs);
+ unregisterAsyncLoggerRingBufferAdmins("*", mbs);
+ unregisterAsyncLoggerConfigRingBufferAdmins("*", mbs);
+ unregisterAppenders("*", mbs);
+ unregisterAsyncAppenders("*", mbs);
+ }
+
+ /**
+ * Returns the {@code ContextSelector} of the current {@code Log4jContextFactory}.
+ *
+ * @return the {@code ContextSelector} of the current {@code Log4jContextFactory}
+ */
+ private static ContextSelector getContextSelector() {
+ final LoggerContextFactory factory = LogManager.getFactory();
+ if (factory instanceof Log4jContextFactory) {
+ final ContextSelector selector = ((Log4jContextFactory) factory).getSelector();
+ return selector;
+ }
+ return null;
+ }
+
+ /**
+ * Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
+ * and {@code Appender}s from the platform MBean server.
+ *
+ * @param loggerContextName name of the logger context to unregister
+ */
+ public static void unregisterLoggerContext(final String loggerContextName) {
+ if (isJmxDisabled()) {
+ LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
+ return;
+ }
+ final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ unregisterLoggerContext(loggerContextName, mbs);
+ }
+
+ /**
+ * Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
+ * and {@code Appender}s from the platform MBean server.
+ *
+ * @param contextName name of the logger context to unregister
+ * @param mbs the MBean Server to unregister the instrumented objects from
+ */
+ public static void unregisterLoggerContext(final String contextName, final MBeanServer mbs) {
+ final String pattern = LoggerContextAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs); // unregister context mbean
+
+ // now unregister all MBeans associated with this logger context
+ unregisterStatusLogger(contextName, mbs);
+ unregisterContextSelector(contextName, mbs);
+ unregisterLoggerConfigs(contextName, mbs);
+ unregisterAppenders(contextName, mbs);
+ unregisterAsyncAppenders(contextName, mbs);
+ unregisterAsyncLoggerRingBufferAdmins(contextName, mbs);
+ unregisterAsyncLoggerConfigRingBufferAdmins(contextName, mbs);
+ }
+
+ private static void registerStatusLogger(final String contextName, final MBeanServer mbs, final Executor executor)
+ throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
+
+ final StatusLoggerAdmin mbean = new StatusLoggerAdmin(contextName, executor);
+ register(mbs, mbean, mbean.getObjectName());
+ }
+
+ private static void registerContextSelector(final String contextName, final ContextSelector selector,
+ final MBeanServer mbs, final Executor executor) throws InstanceAlreadyExistsException,
+ MBeanRegistrationException, NotCompliantMBeanException {
+
+ final ContextSelectorAdmin mbean = new ContextSelectorAdmin(contextName, selector);
+ register(mbs, mbean, mbean.getObjectName());
+ }
+
+ private static void unregisterStatusLogger(final String contextName, final MBeanServer mbs) {
+ final String pattern = StatusLoggerAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterContextSelector(final String contextName, final MBeanServer mbs) {
+ final String pattern = ContextSelectorAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterLoggerConfigs(final String contextName, final MBeanServer mbs) {
+ final String pattern = LoggerConfigAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterContexts(final MBeanServer mbs) {
+ final String pattern = LoggerContextAdminMBean.PATTERN;
+ final String search = String.format(pattern, "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterAppenders(final String contextName, final MBeanServer mbs) {
+ final String pattern = AppenderAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterAsyncAppenders(final String contextName, final MBeanServer mbs) {
+ final String pattern = AsyncAppenderAdminMBean.PATTERN;
+ final String search = String.format(pattern, escape(contextName), "*");
+ unregisterAllMatching(search, mbs);
+ }
+
+ private static void unregisterAsyncLoggerRingBufferAdmins(final String contextName, final MBeanServer mbs) {
+ final String pattern1 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER;
+ final String search1 = String.format(pattern1, escape(contextName));
+ unregisterAllMatching(search1, mbs);
+ }
+
+ private static void unregisterAsyncLoggerConfigRingBufferAdmins(final String contextName, final MBeanServer mbs) {
+ final String pattern2 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER_CONFIG;
+ final String search2 = String.format(pattern2, escape(contextName), "*");
+ unregisterAllMatching(search2, mbs);
+ }
+
+ private static void unregisterAllMatching(final String search, final MBeanServer mbs) {
+ try {
+ final ObjectName pattern = new ObjectName(search);
+ final Set<ObjectName> found = mbs.queryNames(pattern, null);
+ if (found.isEmpty()) {
+ LOGGER.trace("Unregistering but no MBeans found matching '{}'", search);
+ } else {
+ LOGGER.trace("Unregistering {} MBeans: {}", found.size(), found);
+ }
+ for (final ObjectName objectName : found) {
+ mbs.unregisterMBean(objectName);
+ }
+ } catch (final Exception ex) {
+ LOGGER.error("Could not unregister MBeans for " + search, ex);
+ }
+ }
+
+ private static void registerLoggerConfigs(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
+ throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
+
+ final Map<String, LoggerConfig> map = ctx.getConfiguration().getLoggers();
+ for (final String name : map.keySet()) {
+ final LoggerConfig cfg = map.get(name);
+ final LoggerConfigAdmin mbean = new LoggerConfigAdmin(ctx, cfg);
+ register(mbs, mbean, mbean.getObjectName());
+
+ if (cfg instanceof AsyncLoggerConfig) {
+ final AsyncLoggerConfig async = (AsyncLoggerConfig) cfg;
+ final RingBufferAdmin rbmbean = async.createRingBufferAdmin(ctx.getName());
+ register(mbs, rbmbean, rbmbean.getObjectName());
+ }
+ }
+ }
+
+ private static void registerAppenders(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
+ throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
+
+ final Map<String, Appender> map = ctx.getConfiguration().getAppenders();
+ for (final String name : map.keySet()) {
+ final Appender appender = map.get(name);
+
+ if (appender instanceof AsyncAppender) {
+ final AsyncAppender async = ((AsyncAppender) appender);
+ final AsyncAppenderAdmin mbean = new AsyncAppenderAdmin(ctx.getName(), async);
+ register(mbs, mbean, mbean.getObjectName());
+ } else {
+ final AppenderAdmin mbean = new AppenderAdmin(ctx.getName(), appender);
+ register(mbs, mbean, mbean.getObjectName());
+ }
+ }
+ }
+
+ private static void register(final MBeanServer mbs, final Object mbean, final ObjectName objectName)
+ throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
+ LOGGER.debug("Registering MBean {}", objectName);
+ mbs.registerMBean(mbean, objectName);
+ }
+}
diff --git a/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java b/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java
index b0a6122b54..cc853932ef 100644
--- a/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java
+++ b/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java
@@ -19,11 +19,11 @@
package org.apache.lucene.analysis.miscellaneous;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.util.CharArraySet;
import java.io.IOException;
diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
index 9bbe08208d..bcf0a2b201 100644
--- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
+++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
@@ -102,7 +102,6 @@ public class MapperQueryParser extends QueryParser {
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
setPhraseSlop(settings.phraseSlop());
setDefaultOperator(settings.defaultOperator());
- setFuzzyMinSim(settings.fuzziness().asFloat());
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
setLocale(settings.locale());
}
@@ -114,7 +113,7 @@ public class MapperQueryParser extends QueryParser {
@Override
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
if (fuzzySlop.image.length() == 1) {
- return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim));
+ return getFuzzyQuery(qfield, termImage, Float.toString(settings.fuzziness().asDistance(termImage)));
}
return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
}
diff --git a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
index e1ae7b938b..3f5e10cb89 100644
--- a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
+++ b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
@@ -68,14 +68,14 @@ public class StoreRateLimiting {
}
public void setMaxRate(ByteSizeValue rate) {
- if (rate.bytes() <= 0) {
+ if (rate.getBytes() <= 0) {
actualRateLimiter = null;
} else if (actualRateLimiter == null) {
actualRateLimiter = rateLimiter;
- actualRateLimiter.setMBPerSec(rate.mbFrac());
+ actualRateLimiter.setMBPerSec(rate.getMbFrac());
} else {
assert rateLimiter == actualRateLimiter;
- rateLimiter.setMBPerSec(rate.mbFrac());
+ rateLimiter.setMBPerSec(rate.getMbFrac());
}
}
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index 93576c790c..63161a0a18 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -19,6 +19,7 @@
package org.elasticsearch;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -101,7 +102,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public ElasticsearchException(StreamInput in) throws IOException {
super(in.readOptionalString(), in.readException());
readStackTrace(this, in);
- headers.putAll(in.readMapOfLists());
+ headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
}
/**
@@ -196,7 +197,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
out.writeOptionalString(this.getMessage());
out.writeException(this.getCause());
writeStackTraces(this, out);
- out.writeMapOfLists(headers);
+ out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
}
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
@@ -632,8 +633,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
- FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
- org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
+ // 110 used to be FlushNotAllowedEngineException
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
index 772daab2c7..c30662a093 100644
--- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
+++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
@@ -19,12 +19,12 @@
package org.elasticsearch;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus;
@@ -39,7 +39,7 @@ import java.util.Set;
public final class ExceptionsHelper {
- private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
+ private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) {
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index c1c93e3892..3de373ec18 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -73,6 +73,8 @@ public class Version {
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_5_ID = 2030599;
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
+ public static final int V_2_4_0_ID = 2040099;
+ public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
@@ -83,9 +85,13 @@ public class Version {
public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
public static final int V_5_0_0_alpha5_ID = 5000005;
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
- public static final int V_5_0_0_alpha6_ID = 5000006;
- public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
- public static final Version CURRENT = V_5_0_0_alpha6;
+ public static final int V_5_0_0_beta1_ID = 5000026;
+ public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
+ public static final int V_5_0_0_rc1_ID = 5000051;
+ public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
+ public static final int V_6_0_0_alpha1_ID = 6000001;
+ public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
+ public static final Version CURRENT = V_6_0_0_alpha1;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@@ -98,8 +104,12 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
- case V_5_0_0_alpha6_ID:
- return V_5_0_0_alpha6;
+ case V_6_0_0_alpha1_ID:
+ return V_6_0_0_alpha1;
+ case V_5_0_0_rc1_ID:
+ return V_5_0_0_rc1;
+ case V_5_0_0_beta1_ID:
+ return V_5_0_0_beta1;
case V_5_0_0_alpha5_ID:
return V_5_0_0_alpha5;
case V_5_0_0_alpha4_ID:
@@ -110,6 +120,8 @@ public class Version {
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
+ case V_2_4_0_ID:
+ return V_2_4_0;
case V_2_3_5_ID:
return V_2_3_5;
case V_2_3_4_ID:
@@ -344,4 +356,9 @@ public class Version {
public boolean isRC() {
return build > 50 && build < 99;
}
+
+ public boolean isRelease() {
+ return build == 99;
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java
index ca5349661c..a3797c3cb8 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -20,7 +20,6 @@
package org.elasticsearch.action;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -289,6 +288,7 @@ import org.elasticsearch.rest.action.cat.RestSegmentsAction;
import org.elasticsearch.rest.action.cat.RestShardsAction;
import org.elasticsearch.rest.action.cat.RestSnapshotAction;
import org.elasticsearch.rest.action.cat.RestTasksAction;
+import org.elasticsearch.rest.action.cat.RestTemplatesAction;
import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
import org.elasticsearch.rest.action.document.RestBulkAction;
import org.elasticsearch.rest.action.document.RestDeleteAction;
@@ -335,7 +335,7 @@ public class ActionModule extends AbstractModule {
this.actionPlugins = actionPlugins;
actions = setupActions(actionPlugins);
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
- autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
+ autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers);
@@ -604,6 +604,7 @@ public class ActionModule extends AbstractModule {
registerRestHandler(handlers, RestNodeAttrsAction.class);
registerRestHandler(handlers, RestRepositoriesAction.class);
registerRestHandler(handlers, RestSnapshotAction.class);
+ registerRestHandler(handlers, RestTemplatesAction.class);
for (ActionPlugin plugin : actionPlugins) {
for (Class<? extends RestHandler> handler : plugin.getRestHandlers()) {
registerRestHandler(handlers, handler);
@@ -664,4 +665,8 @@ public class ActionModule extends AbstractModule {
}
}
}
+
+ public RestController getRestController() {
+ return restController;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
index 526f8d0706..a304fa60cb 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
@@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -168,31 +169,35 @@ public class TransportClusterAllocationExplainAction
if (node.getId().equals(assignedNodeId)) {
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
finalExplanation = "the shard is already assigned to this node";
- } else if (hasPendingAsyncFetch &&
- shard.primary() == false &&
- shard.unassigned() &&
- shard.allocatedPostIndexCreate(indexMetaData) &&
- nodeDecision.type() != Decision.Type.YES) {
+ } else if (shard.unassigned() && shard.primary() == false &&
+ shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) {
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
- " decision and the shard's state is still being fetched";
+ " decision";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
- } else if (hasPendingAsyncFetch &&
- shard.unassigned() &&
- shard.allocatedPostIndexCreate(indexMetaData)) {
+ } else if (shard.unassigned() && shard.primary() == false &&
+ shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) {
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
- } else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
+ } else if (shard.primary() && shard.unassigned() &&
+ (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE ||
+ shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT)
+ && hasPendingAsyncFetch) {
+ finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
+ finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
+ } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
- } else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
+ } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
finalExplanation = "there is no copy of the shard available";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
- } else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
+ } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
+ storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
finalExplanation = "the copy of the shard is corrupt";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
- } else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
+ } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
+ storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
finalExplanation = "the copy of the shard cannot be read";
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
} else {
@@ -258,7 +263,7 @@ public class TransportClusterAllocationExplainAction
Float weight = weights.get(node);
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
- storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
+ storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()),
allocation.hasPendingAsyncFetch());
explanations.put(node, nodeExplanation);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
index 27970f332f..47e0ecd7f7 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
@@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
@@ -41,8 +42,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
private String[] indices;
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
private ClusterHealthStatus waitForStatus;
- private int waitForRelocatingShards = -1;
- private int waitForActiveShards = -1;
+ private boolean waitForNoRelocatingShards = false;
+ private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE;
private String waitForNodes = "";
private Priority waitForEvents = null;
@@ -102,24 +103,52 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
return waitForStatus(ClusterHealthStatus.YELLOW);
}
- public int waitForRelocatingShards() {
- return waitForRelocatingShards;
+ public boolean waitForNoRelocatingShards() {
+ return waitForNoRelocatingShards;
}
- public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
- this.waitForRelocatingShards = waitForRelocatingShards;
+ /**
+ * Sets whether the request should wait for there to be no relocating shards before
+ * retrieving the cluster health status. Defaults to {@code false}, meaning the
+ * operation does not wait on there being no more relocating shards. Set to <code>true</code>
+ * to wait until the number of relocating shards in the cluster is 0.
+ */
+ public ClusterHealthRequest waitForNoRelocatingShards(boolean waitForNoRelocatingShards) {
+ this.waitForNoRelocatingShards = waitForNoRelocatingShards;
return this;
}
- public int waitForActiveShards() {
+ public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
- public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
- this.waitForActiveShards = waitForActiveShards;
+ /**
+ * Sets the number of shard copies that must be active across all indices before getting the
+ * health status. Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
+ * Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
+ * all replicas) to be active across all indices in the cluster. Otherwise, use
+ * {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
+ * total number of shard copies to wait for.
+ */
+ public ClusterHealthRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
+ if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
+ // the default for cluster health request is 0, not 1
+ this.waitForActiveShards = ActiveShardCount.NONE;
+ } else {
+ this.waitForActiveShards = waitForActiveShards;
+ }
return this;
}
+ /**
+ * A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical
+ * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
+ * to get the ActiveShardCount.
+ */
+ public ClusterHealthRequest waitForActiveShards(final int waitForActiveShards) {
+ return waitForActiveShards(ActiveShardCount.from(waitForActiveShards));
+ }
+
public String waitForNodes() {
return waitForNodes;
}
@@ -162,8 +191,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
if (in.readBoolean()) {
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
}
- waitForRelocatingShards = in.readInt();
- waitForActiveShards = in.readInt();
+ waitForNoRelocatingShards = in.readBoolean();
+ waitForActiveShards = ActiveShardCount.readFrom(in);
waitForNodes = in.readString();
if (in.readBoolean()) {
waitForEvents = Priority.readFrom(in);
@@ -188,8 +217,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
out.writeBoolean(true);
out.writeByte(waitForStatus.value());
}
- out.writeInt(waitForRelocatingShards);
- out.writeInt(waitForActiveShards);
+ out.writeBoolean(waitForNoRelocatingShards);
+ waitForActiveShards.writeTo(out);
out.writeString(waitForNodes);
if (waitForEvents == null) {
out.writeBoolean(false);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java
index feacbb9511..1a82cf8cb1 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.health;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
@@ -64,11 +65,40 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB
return this;
}
- public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) {
- request.waitForRelocatingShards(waitForRelocatingShards);
+ /**
+ * Sets whether the request should wait for there to be no relocating shards before
+ * retrieving the cluster health status. Defaults to <code>false</code>, meaning the
+ * operation does not wait on there being no more relocating shards. Set to <code>true</code>
+ * to wait until the number of relocating shards in the cluster is 0.
+ */
+ public ClusterHealthRequestBuilder setWaitForNoRelocatingShards(boolean waitForRelocatingShards) {
+ request.waitForNoRelocatingShards(waitForRelocatingShards);
return this;
}
+ /**
+ * Sets the number of shard copies that must be active before getting the health status.
+ * Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
+ * Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
+ * all replicas) to be active across all indices in the cluster. Otherwise, use
+ * {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
+ * total number of shard copies that would exist across all indices in the cluster.
+ */
+ public ClusterHealthRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
+ if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
+ // the default for cluster health is 0, not 1
+ request.waitForActiveShards(ActiveShardCount.NONE);
+ } else {
+ request.waitForActiveShards(waitForActiveShards);
+ }
+ return this;
+ }
+
+ /**
+ * A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
+ * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
+ * to get the ActiveShardCount.
+ */
public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) {
request.waitForActiveShards(waitForActiveShards);
return this;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
index 7d0a628892..9314079424 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
@@ -19,8 +19,11 @@
package org.elasticsearch.action.admin.cluster.health;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterState;
@@ -105,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
listener.onFailure(e);
}
@@ -125,10 +128,10 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
if (request.waitForStatus() == null) {
waitFor--;
}
- if (request.waitForRelocatingShards() == -1) {
+ if (request.waitForNoRelocatingShards() == false) {
waitFor--;
}
- if (request.waitForActiveShards() == -1) {
+ if (request.waitForActiveShards().equals(ActiveShardCount.NONE)) {
waitFor--;
}
if (request.waitForNodes().isEmpty()) {
@@ -203,11 +206,22 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) {
waitForCounter++;
}
- if (request.waitForRelocatingShards() != -1 && response.getRelocatingShards() <= request.waitForRelocatingShards()) {
+ if (request.waitForNoRelocatingShards() && response.getRelocatingShards() == 0) {
waitForCounter++;
}
- if (request.waitForActiveShards() != -1 && response.getActiveShards() >= request.waitForActiveShards()) {
- waitForCounter++;
+ if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
+ ActiveShardCount waitForActiveShards = request.waitForActiveShards();
+ assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false :
+ "waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE";
+ if (waitForActiveShards.equals(ActiveShardCount.ALL)
+ && response.getUnassignedShards() == 0
+ && response.getInitializingShards() == 0) {
+ // if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0
+ waitForCounter++;
+ } else if (waitForActiveShards.enoughShardsActive(response.getActiveShards())) {
+ // there are enough active shards to meet the requirements of the request
+ waitForCounter++;
+ }
}
if (request.indices() != null && request.indices().length > 0) {
try {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
index d7ce899792..0ab12fe6c0 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
@@ -37,10 +37,6 @@ import org.elasticsearch.threadpool.ThreadPoolInfo;
import org.elasticsearch.transport.TransportInfo;
import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import static java.util.Collections.unmodifiableMap;
/**
* Node information (static, does not change over time).
@@ -85,8 +81,8 @@ public class NodeInfo extends BaseNodeResponse {
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
- @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
- @Nullable ByteSizeValue totalIndexingBuffer) {
+ @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins,
+ @Nullable IngestInfo ingest, @Nullable ByteSizeValue totalIndexingBuffer) {
super(node);
this.version = version;
this.build = build;
@@ -205,31 +201,14 @@ public class NodeInfo extends BaseNodeResponse {
if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in);
}
- if (in.readBoolean()) {
- os = OsInfo.readOsInfo(in);
- }
- if (in.readBoolean()) {
- process = ProcessInfo.readProcessInfo(in);
- }
- if (in.readBoolean()) {
- jvm = JvmInfo.readJvmInfo(in);
- }
- if (in.readBoolean()) {
- threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
- }
- if (in.readBoolean()) {
- transport = TransportInfo.readTransportInfo(in);
- }
- if (in.readBoolean()) {
- http = HttpInfo.readHttpInfo(in);
- }
- if (in.readBoolean()) {
- plugins = new PluginsAndModules();
- plugins.readFrom(in);
- }
- if (in.readBoolean()) {
- ingest = new IngestInfo(in);
- }
+ os = in.readOptionalWriteable(OsInfo::new);
+ process = in.readOptionalWriteable(ProcessInfo::new);
+ jvm = in.readOptionalWriteable(JvmInfo::new);
+ threadPool = in.readOptionalWriteable(ThreadPoolInfo::new);
+ transport = in.readOptionalWriteable(TransportInfo::new);
+ http = in.readOptionalWriteable(HttpInfo::new);
+ plugins = in.readOptionalWriteable(PluginsAndModules::new);
+ ingest = in.readOptionalWriteable(IngestInfo::new);
}
@Override
@@ -241,7 +220,7 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
- out.writeLong(totalIndexingBuffer.bytes());
+ out.writeLong(totalIndexingBuffer.getBytes());
}
if (settings == null) {
out.writeBoolean(false);
@@ -249,53 +228,13 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true);
Settings.writeSettingsToStream(settings, out);
}
- if (os == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- os.writeTo(out);
- }
- if (process == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- process.writeTo(out);
- }
- if (jvm == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- jvm.writeTo(out);
- }
- if (threadPool == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- threadPool.writeTo(out);
- }
- if (transport == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- transport.writeTo(out);
- }
- if (http == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- http.writeTo(out);
- }
- if (plugins == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- plugins.writeTo(out);
- }
- if (ingest == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- ingest.writeTo(out);
- }
+ out.writeOptionalWriteable(os);
+ out.writeOptionalWriteable(process);
+ out.writeOptionalWriteable(jvm);
+ out.writeOptionalWriteable(threadPool);
+ out.writeOptionalWriteable(transport);
+ out.writeOptionalWriteable(http);
+ out.writeOptionalWriteable(plugins);
+ out.writeOptionalWriteable(ingest);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java
index 3831fd24f3..206dd262ed 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java
@@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.plugins.PluginInfo;
@@ -34,13 +34,24 @@ import java.util.List;
/**
* Information about plugins and modules
*/
-public class PluginsAndModules implements Streamable, ToXContent {
- private List<PluginInfo> plugins;
- private List<PluginInfo> modules;
+public class PluginsAndModules implements Writeable, ToXContent {
+ private final List<PluginInfo> plugins;
+ private final List<PluginInfo> modules;
- public PluginsAndModules() {
- plugins = new ArrayList<>();
- modules = new ArrayList<>();
+ public PluginsAndModules(List<PluginInfo> plugins, List<PluginInfo> modules) {
+ this.plugins = Collections.unmodifiableList(plugins);
+ this.modules = Collections.unmodifiableList(modules);
+ }
+
+ public PluginsAndModules(StreamInput in) throws IOException {
+ this.plugins = Collections.unmodifiableList(in.readList(PluginInfo::new));
+ this.modules = Collections.unmodifiableList(in.readList(PluginInfo::new));
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeList(plugins);
+ out.writeList(modules);
}
/**
@@ -70,33 +81,6 @@ public class PluginsAndModules implements Streamable, ToXContent {
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- if (plugins.isEmpty() == false || modules.isEmpty() == false) {
- throw new IllegalStateException("instance is already populated");
- }
- int plugins_size = in.readInt();
- for (int i = 0; i < plugins_size; i++) {
- plugins.add(PluginInfo.readFromStream(in));
- }
- int modules_size = in.readInt();
- for (int i = 0; i < modules_size; i++) {
- modules.add(PluginInfo.readFromStream(in));
- }
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeInt(plugins.size());
- for (PluginInfo plugin : getPluginInfos()) {
- plugin.writeTo(out);
- }
- out.writeInt(modules.size());
- for (PluginInfo module : getModuleInfos()) {
- module.writeTo(out);
- }
- }
-
- @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("plugins");
for (PluginInfo pluginInfo : getPluginInfos()) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
index 6d50925089..38e91e3424 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
@@ -211,30 +211,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
if (in.readBoolean()) {
indices = NodeIndicesStats.readIndicesStats(in);
}
- if (in.readBoolean()) {
- os = OsStats.readOsStats(in);
- }
- if (in.readBoolean()) {
- process = ProcessStats.readProcessStats(in);
- }
- if (in.readBoolean()) {
- jvm = JvmStats.readJvmStats(in);
- }
- if (in.readBoolean()) {
- threadPool = ThreadPoolStats.readThreadPoolStats(in);
- }
- if (in.readBoolean()) {
- fs = new FsInfo(in);
- }
- if (in.readBoolean()) {
- transport = TransportStats.readTransportStats(in);
- }
- if (in.readBoolean()) {
- http = HttpStats.readHttpStats(in);
- }
- breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
- scriptStats = in.readOptionalStreamable(ScriptStats::new);
- discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
+ os = in.readOptionalWriteable(OsStats::new);
+ process = in.readOptionalWriteable(ProcessStats::new);
+ jvm = in.readOptionalWriteable(JvmStats::new);
+ threadPool = in.readOptionalWriteable(ThreadPoolStats::new);
+ fs = in.readOptionalWriteable(FsInfo::new);
+ transport = in.readOptionalWriteable(TransportStats::new);
+ http = in.readOptionalWriteable(HttpStats::new);
+ breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new);
+ scriptStats = in.readOptionalWriteable(ScriptStats::new);
+ discoveryStats = in.readOptionalWriteable(DiscoveryStats::new);
ingestStats = in.readOptionalWriteable(IngestStats::new);
}
@@ -248,51 +234,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
out.writeBoolean(true);
indices.writeTo(out);
}
- if (os == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- os.writeTo(out);
- }
- if (process == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- process.writeTo(out);
- }
- if (jvm == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- jvm.writeTo(out);
- }
- if (threadPool == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- threadPool.writeTo(out);
- }
- if (fs == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- fs.writeTo(out);
- }
- if (transport == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- transport.writeTo(out);
- }
- if (http == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- http.writeTo(out);
- }
- out.writeOptionalStreamable(breaker);
- out.writeOptionalStreamable(scriptStats);
- out.writeOptionalStreamable(discoveryStats);
+ out.writeOptionalWriteable(os);
+ out.writeOptionalWriteable(process);
+ out.writeOptionalWriteable(jvm);
+ out.writeOptionalWriteable(threadPool);
+ out.writeOptionalWriteable(fs);
+ out.writeOptionalWriteable(transport);
+ out.writeOptionalWriteable(http);
+ out.writeOptionalWriteable(breaker);
+ out.writeOptionalWriteable(scriptStats);
+ out.writeOptionalWriteable(discoveryStats);
out.writeOptionalWriteable(ingestStats);
}
@@ -318,11 +269,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
builder.endObject();
}
}
-
if (getIndices() != null) {
getIndices().toXContent(builder, params);
}
-
if (getOs() != null) {
getOs().toXContent(builder, params);
}
@@ -350,15 +299,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
if (getScriptStats() != null) {
getScriptStats().toXContent(builder, params);
}
-
if (getDiscoveryStats() != null) {
getDiscoveryStats().toXContent(builder, params);
}
-
if (getIngestStats() != null) {
getIngestStats().toXContent(builder, params);
}
-
return builder;
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java
index 88162a617a..a2098d1736 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java
@@ -268,7 +268,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- indices = CommonStatsFlags.readCommonStatsFlags(in);
+ indices = new CommonStatsFlags(in);
os = in.readBoolean();
process = in.readBoolean();
jvm = in.readBoolean();
@@ -298,5 +298,4 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
out.writeBoolean(discovery);
out.writeBoolean(ingest);
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
index dc77a1a6e8..670812a0b4 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
@@ -216,7 +216,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
// We haven't yet created the index for the task results so it can't be found.
- listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", e,
+ listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
request.getTaskId()));
} else {
listener.onFailure(e);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
index 875562ad64..7aade821f8 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
@@ -19,6 +19,9 @@
package org.elasticsearch.action.admin.cluster.reroute;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -28,12 +31,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -77,13 +78,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
private final ClusterRerouteRequest request;
private final ActionListener<ClusterRerouteResponse> listener;
- private final ESLogger logger;
+ private final Logger logger;
private final AllocationService allocationService;
private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations;
- ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
- ActionListener<ClusterRerouteResponse> listener) {
+ ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
+ ActionListener<ClusterRerouteResponse> listener) {
super(Priority.IMMEDIATE, request, listener);
this.request = request;
this.listener = listener;
@@ -103,21 +104,20 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override
public void onFailure(String source, Exception e) {
- logger.debug("failed to perform [{}]", e, source);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}
@Override
public ClusterState execute(ClusterState currentState) {
- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
- request.isRetryFailed());
- ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
- clusterStateToSend = newState;
- explanations = routingResult.explanations();
+ AllocationService.CommandsResult commandsResult =
+ allocationService.reroute(currentState, request.getCommands(), request.explain(), request.isRetryFailed());
+ clusterStateToSend = commandsResult.getClusterState();
+ explanations = commandsResult.explanations();
if (request.dryRun()) {
return currentState;
}
- return newState;
+ return commandsResult.getClusterState();
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
index 4464b5d793..7e77d22243 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.settings;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
@@ -31,7 +33,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
@@ -148,25 +149,21 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public void onFailure(String source, Exception e) {
//if the reroute fails we only log
- logger.debug("failed to perform [{}]", e, source);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
}
@Override
public ClusterState execute(final ClusterState currentState) {
// now, reroute in case things that require it changed (e.g. number of replicas)
- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "reroute after cluster update settings");
- if (!routingResult.changed()) {
- return currentState;
- }
- return ClusterState.builder(currentState).routingResult(routingResult).build();
+ return allocationService.reroute(currentState, "reroute after cluster update settings");
}
});
}
@Override
public void onFailure(String source, Exception e) {
- logger.debug("failed to perform [{}]", e, source);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java
index 015134aba7..cc6ca27672 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java
@@ -38,6 +38,8 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
private String[] snapshots = Strings.EMPTY_ARRAY;
+ private boolean ignoreUnavailable;
+
public SnapshotsStatusRequest() {
}
@@ -112,11 +114,33 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
return this;
}
+ /**
+ * Set to <code>true</code> to ignore unavailable snapshots, instead of throwing an exception.
+ * Defaults to <code>false</code>, which means unavailable snapshots cause an exception to be thrown.
+ *
+ * @param ignoreUnavailable whether to ignore unavailable snapshots
+ * @return this request
+ */
+ public SnapshotsStatusRequest ignoreUnavailable(boolean ignoreUnavailable) {
+ this.ignoreUnavailable = ignoreUnavailable;
+ return this;
+ }
+
+ /**
+ * Returns whether the request permits unavailable snapshots to be ignored.
+ *
+ * @return true if the request will ignore unavailable snapshots, false if it will throw an exception on unavailable snapshots
+ */
+ public boolean ignoreUnavailable() {
+ return ignoreUnavailable;
+ }
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
repository = in.readString();
snapshots = in.readStringArray();
+ ignoreUnavailable = in.readBoolean();
}
@Override
@@ -124,5 +148,6 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
super.writeTo(out);
out.writeString(repository);
out.writeStringArray(snapshots);
+ out.writeBoolean(ignoreUnavailable);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java
index 3ec1733cea..9e4b4652cc 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java
@@ -74,4 +74,16 @@ public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBui
request.snapshots(ArrayUtils.concat(request.snapshots(), snapshots));
return this;
}
+
+ /**
+ * Set to <code>true</code> to ignore unavailable snapshots, instead of throwing an exception.
+ * Defaults to <code>false</code>, which means unavailable snapshots cause an exception to be thrown.
+ *
+ * @param ignoreUnavailable whether to ignore unavailable snapshots.
+ * @return this builder
+ */
+ public SnapshotsStatusRequestBuilder setIgnoreUnavailable(boolean ignoreUnavailable) {
+ request.ignoreUnavailable(ignoreUnavailable);
+ return this;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
index 76fe9510ef..cf00784dc3 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
@@ -214,7 +214,14 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
if (snapshotId == null) {
// neither in the current snapshot entries nor found in the repository
- throw new SnapshotMissingException(repositoryName, snapshotName);
+ if (request.ignoreUnavailable()) {
+ // ignoring unavailable snapshots, so skip over
+ logger.debug("snapshot status request ignoring snapshot [{}], not found in repository [{}]",
+ snapshotName, repositoryName);
+ continue;
+ } else {
+ throw new SnapshotMissingException(repositoryName, snapshotName);
+ }
}
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
index 017b448124..6102f13754 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
@@ -21,11 +21,12 @@ package org.elasticsearch.action.admin.cluster.stats;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
-
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -39,11 +40,13 @@ import org.elasticsearch.plugins.PluginInfo;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
public class ClusterStatsNodes implements ToXContent {
@@ -54,6 +57,7 @@ public class ClusterStatsNodes implements ToXContent {
private final JvmStats jvm;
private final FsInfo.Path fs;
private final Set<PluginInfo> plugins;
+ private final NetworkTypes networkTypes;
ClusterStatsNodes(List<ClusterStatsNodeResponse> nodeResponses) {
this.versions = new HashSet<>();
@@ -79,13 +83,14 @@ public class ClusterStatsNodes implements ToXContent {
continue;
}
if (nodeResponse.nodeStats().getFs() != null) {
- this.fs.add(nodeResponse.nodeStats().getFs().total());
+ this.fs.add(nodeResponse.nodeStats().getFs().getTotal());
}
}
this.counts = new Counts(nodeInfos);
- this.os = new OsStats(nodeInfos);
+ this.os = new OsStats(nodeInfos, nodeStats);
this.process = new ProcessStats(nodeStats);
this.jvm = new JvmStats(nodeInfos, nodeStats);
+ this.networkTypes = new NetworkTypes(nodeInfos);
}
public Counts getCounts() {
@@ -124,6 +129,7 @@ public class ClusterStatsNodes implements ToXContent {
static final String JVM = "jvm";
static final String FS = "fs";
static final String PLUGINS = "plugins";
+ static final String NETWORK_TYPES = "network_types";
}
@Override
@@ -158,6 +164,10 @@ public class ClusterStatsNodes implements ToXContent {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
+
+ builder.startObject(Fields.NETWORK_TYPES);
+ networkTypes.toXContent(builder, params);
+ builder.endObject();
return builder;
}
@@ -216,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent {
final int availableProcessors;
final int allocatedProcessors;
final ObjectIntHashMap<String> names;
+ final org.elasticsearch.monitor.os.OsStats.Mem mem;
/**
* Build the stats from information about each node.
*/
- private OsStats(List<NodeInfo> nodeInfos) {
+ private OsStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
this.names = new ObjectIntHashMap<>();
int availableProcessors = 0;
int allocatedProcessors = 0;
@@ -234,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent {
}
this.availableProcessors = availableProcessors;
this.allocatedProcessors = allocatedProcessors;
+
+ long totalMemory = 0;
+ long freeMemory = 0;
+ for (NodeStats nodeStats : nodeStatsList) {
+ if (nodeStats.getOs() != null) {
+ long total = nodeStats.getOs().getMem().getTotal().getBytes();
+ if (total > 0) {
+ totalMemory += total;
+ }
+ long free = nodeStats.getOs().getMem().getFree().getBytes();
+ if (free > 0) {
+ freeMemory += free;
+ }
+ }
+ }
+ this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory);
}
public int getAvailableProcessors() {
@@ -244,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent {
return allocatedProcessors;
}
+ public org.elasticsearch.monitor.os.OsStats.Mem getMem() {
+ return mem;
+ }
+
static final class Fields {
static final String AVAILABLE_PROCESSORS = "available_processors";
static final String ALLOCATED_PROCESSORS = "allocated_processors";
@@ -264,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent {
builder.endObject();
}
builder.endArray();
+ mem.toXContent(builder, params);
return builder;
}
}
@@ -391,8 +423,8 @@ public class ClusterStatsNodes implements ToXContent {
}
maxUptime = Math.max(maxUptime, js.getUptime().millis());
if (js.getMem() != null) {
- heapUsed += js.getMem().getHeapUsed().bytes();
- heapMax += js.getMem().getHeapMax().bytes();
+ heapUsed += js.getMem().getHeapUsed().getBytes();
+ heapMax += js.getMem().getHeapMax().getBytes();
}
}
this.threads = threads;
@@ -506,4 +538,43 @@ public class ClusterStatsNodes implements ToXContent {
return vmVersion.hashCode();
}
}
+
+ static class NetworkTypes implements ToXContent {
+
+ private final Map<String, AtomicInteger> transportTypes;
+ private final Map<String, AtomicInteger> httpTypes;
+
+ private NetworkTypes(final List<NodeInfo> nodeInfos) {
+ final Map<String, AtomicInteger> transportTypes = new HashMap<>();
+ final Map<String, AtomicInteger> httpTypes = new HashMap<>();
+ for (final NodeInfo nodeInfo : nodeInfos) {
+ final Settings settings = nodeInfo.getSettings();
+ final String transportType =
+ settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
+ final String httpType =
+ settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
+ transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet();
+ httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet();
+ }
+ this.transportTypes = Collections.unmodifiableMap(transportTypes);
+ this.httpTypes = Collections.unmodifiableMap(httpTypes);
+ }
+
+ @Override
+ public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
+ builder.startObject("transport_types");
+ for (final Map.Entry<String, AtomicInteger> entry : transportTypes.entrySet()) {
+ builder.field(entry.getKey(), entry.getValue().get());
+ }
+ builder.endObject();
+ builder.startObject("http_types");
+ for (final Map.Entry<String, AtomicInteger> entry : httpTypes.entrySet()) {
+ builder.field(entry.getKey(), entry.getValue().get());
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
index bc89495237..3eb7327383 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
@@ -91,8 +91,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
- NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false, false);
- NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
+ NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
+ NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java
index 42d34d6ebd..ab723339e2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java
@@ -21,29 +21,22 @@ package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
import org.elasticsearch.cluster.metadata.AliasAction;
+import java.util.List;
+
/**
* Cluster state update request that allows to add or remove aliases
*/
public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateRequest<IndicesAliasesClusterStateUpdateRequest> {
+ private final List<AliasAction> actions;
- AliasAction[] actions;
-
- public IndicesAliasesClusterStateUpdateRequest() {
-
+ public IndicesAliasesClusterStateUpdateRequest(List<AliasAction> actions) {
+ this.actions = actions;
}
/**
* Returns the alias actions to be performed
*/
- public AliasAction[] actions() {
+ public List<AliasAction> actions() {
return actions;
}
-
- /**
- * Sets the alias actions to be executed
- */
- public IndicesAliasesClusterStateUpdateRequest actions(AliasAction[] actions) {
- this.actions = actions;
- return this;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java
index fa79a213bf..63493210f7 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java
@@ -20,6 +20,8 @@
package org.elasticsearch.action.admin.indices.alias;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+
+import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.AliasesRequest;
import org.elasticsearch.action.CompositeIndicesRequest;
@@ -27,30 +29,41 @@ import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.AliasAction;
-import org.elasticsearch.cluster.metadata.AliasAction.Type;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParseFieldMatcherSupplier;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
-import java.util.Locale;
import java.util.Map;
+import java.util.Objects;
+import java.util.function.Supplier;
import static org.elasticsearch.action.ValidateActions.addValidationError;
-import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
+import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
/**
* A request to add/remove aliases for one or more indices.
*/
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> implements CompositeIndicesRequest {
-
private List<AliasActions> allAliasActions = new ArrayList<>();
//indices options that require every specified index to exist, expand wildcards only to open indices and
@@ -61,94 +74,317 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
}
- /*
- * Aliases can be added by passing multiple indices to the Request and
- * deleted by passing multiple indices and aliases. They are expanded into
- * distinct AliasAction instances when the request is processed. This class
- * holds the AliasAction and in addition the arrays or alias names and
- * indices that is later used to create the final AliasAction instances.
+ /**
+ * Request to take one or more actions on one or more indexes and alias combinations.
*/
- public static class AliasActions implements AliasesRequest {
- private String[] indices = Strings.EMPTY_ARRAY;
- private String[] aliases = Strings.EMPTY_ARRAY;
- private AliasAction aliasAction;
+ public static class AliasActions implements AliasesRequest, Writeable {
+ public enum Type {
+ ADD((byte) 0),
+ REMOVE((byte) 1),
+ REMOVE_INDEX((byte) 2);
+
+ private final byte value;
+
+ Type(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
- public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
- aliasAction = new AliasAction(type);
- indices(indices);
- aliases(aliases);
+ public static Type fromValue(byte value) {
+ switch (value) {
+ case 0: return ADD;
+ case 1: return REMOVE;
+ case 2: return REMOVE_INDEX;
+ default: throw new IllegalArgumentException("No type for action [" + value + "]");
+ }
+ }
}
- public AliasActions(AliasAction.Type type, String index, String alias) {
- aliasAction = new AliasAction(type);
- indices(index);
- aliases(alias);
+ /**
+ * Build a new {@link AliasAction} to add aliases.
+ */
+ public static AliasActions add() {
+ return new AliasActions(AliasActions.Type.ADD);
+ }
+ /**
+ * Build a new {@link AliasAction} to remove aliases.
+ */
+ public static AliasActions remove() {
+ return new AliasActions(AliasActions.Type.REMOVE);
+ }
+ /**
+ * Build a new {@link AliasAction} to remove aliases.
+ */
+ public static AliasActions removeIndex() {
+ return new AliasActions(AliasActions.Type.REMOVE_INDEX);
+ }
+ private static ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser(String name, Supplier<AliasActions> supplier) {
+ ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser = new ObjectParser<>(name, supplier);
+ parser.declareString((action, index) -> {
+ if (action.indices() != null) {
+ throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
+ }
+ action.index(index);
+ }, new ParseField("index"));
+ parser.declareStringArray(fromList(String.class, (action, indices) -> {
+ if (action.indices() != null) {
+ throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
+ }
+ action.indices(indices);
+ }), new ParseField("indices"));
+ parser.declareString((action, alias) -> {
+ if (action.aliases() != null && action.aliases().length != 0) {
+ throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
+ }
+ action.alias(alias);
+ }, new ParseField("alias"));
+ parser.declareStringArray(fromList(String.class, (action, aliases) -> {
+ if (action.aliases() != null && action.aliases().length != 0) {
+ throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
+ }
+ action.aliases(aliases);
+ }), new ParseField("aliases"));
+ return parser;
}
- AliasActions(AliasAction.Type type, String[] index, String alias) {
- aliasAction = new AliasAction(type);
- indices(index);
- aliases(alias);
+ private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> ADD_PARSER = parser("add", AliasActions::add);
+ static {
+ ADD_PARSER.declareObject(AliasActions::filter, (parser, m) -> {
+ try {
+ return parser.mapOrdered();
+ } catch (IOException e) {
+ throw new ParsingException(parser.getTokenLocation(), "Problems parsing [filter]", e);
+ }
+ }, new ParseField("filter"));
+ // Since we need to support numbers AND strings here we have to use ValueType.INT.
+ ADD_PARSER.declareField(AliasActions::routing, p -> p.text(), new ParseField("routing"), ValueType.INT);
+ ADD_PARSER.declareField(AliasActions::indexRouting, p -> p.text(), new ParseField("index_routing"), ValueType.INT);
+ ADD_PARSER.declareField(AliasActions::searchRouting, p -> p.text(), new ParseField("search_routing"), ValueType.INT);
+ }
+ private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_PARSER = parser("remove", AliasActions::remove);
+ private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_INDEX_PARSER = parser("remove_index",
+ AliasActions::removeIndex);
+
+ /**
+ * Parser for any one {@link AliasAction}.
+ */
+ public static final ConstructingObjectParser<AliasActions, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
+ "alias_action", a -> {
+ // Take the first action and complain if there are more than one actions
+ AliasActions action = null;
+ for (Object o : a) {
+ if (o != null) {
+ if (action == null) {
+ action = (AliasActions) o;
+ } else {
+ throw new IllegalArgumentException("Too many operations declared in on opeation entry");
+ }
+ }
+ }
+ return action;
+ });
+ static {
+ PARSER.declareObject(optionalConstructorArg(), ADD_PARSER, new ParseField("add"));
+ PARSER.declareObject(optionalConstructorArg(), REMOVE_PARSER, new ParseField("remove"));
+ PARSER.declareObject(optionalConstructorArg(), REMOVE_INDEX_PARSER, new ParseField("remove_index"));
}
- public AliasActions(AliasAction action) {
- this.aliasAction = action;
- indices(action.index());
- aliases(action.alias());
+ private final AliasActions.Type type;
+ private String[] indices;
+ private String[] aliases = Strings.EMPTY_ARRAY;
+ private String filter;
+ private String routing;
+ private String indexRouting;
+ private String searchRouting;
+
+ AliasActions(AliasActions.Type type) {
+ this.type = type;
}
- public AliasActions(Type type, String index, String[] aliases) {
- aliasAction = new AliasAction(type);
- indices(index);
- aliases(aliases);
+ /**
+ * Read from a stream.
+ */
+ public AliasActions(StreamInput in) throws IOException {
+ type = AliasActions.Type.fromValue(in.readByte());
+ indices = in.readStringArray();
+ aliases = in.readStringArray();
+ filter = in.readOptionalString();
+ routing = in.readOptionalString();
+ searchRouting = in.readOptionalString();
+ indexRouting = in.readOptionalString();
}
- public AliasActions() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeByte(type.value());
+ out.writeStringArray(indices);
+ out.writeStringArray(aliases);
+ out.writeOptionalString(filter);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(searchRouting);
+ out.writeOptionalString(indexRouting);
}
- public AliasActions filter(Map<String, Object> filter) {
- aliasAction.filter(filter);
+ /**
+ * Validate that the action is sane. Called when the action is added to the request because actions can be invalid while being
+ * built.
+ */
+ void validate() {
+ if (indices == null) {
+ throw new IllegalArgumentException("One of [index] or [indices] is required");
+ }
+ if (type != AliasActions.Type.REMOVE_INDEX && (aliases == null || aliases.length == 0)) {
+ throw new IllegalArgumentException("One of [alias] or [aliases] is required");
+ }
+ }
+
+ /**
+ * Type of the action to perform.
+ */
+ public AliasActions.Type actionType() {
+ return type;
+ }
+
+ @Override
+ public AliasActions indices(String... indices) {
+ if (indices == null || indices.length == 0) {
+ throw new IllegalArgumentException("[indices] can't be empty");
+ }
+ for (String index : indices) {
+ if (false == Strings.hasLength(index)) {
+ throw new IllegalArgumentException("[indices] can't contain empty string");
+ }
+ }
+ this.indices = indices;
return this;
}
- public AliasActions filter(QueryBuilder filter) {
- aliasAction.filter(filter);
+ /**
+ * Set the index this action is operating on.
+ */
+ public AliasActions index(String index) {
+ if (false == Strings.hasLength(index)) {
+ throw new IllegalArgumentException("[index] can't be empty string");
+ }
+ this.indices = new String[] {index};
return this;
}
- public Type actionType() {
- return aliasAction.actionType();
+ /**
+ * Aliases to use with this action.
+ */
+ @Override
+ public AliasActions aliases(String... aliases) {
+ if (type == AliasActions.Type.REMOVE_INDEX) {
+ throw new IllegalArgumentException("[aliases] is unsupported for [" + type + "]");
+ }
+ if (aliases == null || aliases.length == 0) {
+ throw new IllegalArgumentException("[aliases] can't be empty");
+ }
+ for (String alias : aliases) {
+ if (false == Strings.hasLength(alias)) {
+ throw new IllegalArgumentException("[aliases] can't contain empty string");
+ }
+ }
+ this.aliases = aliases;
+ return this;
}
- public void routing(String routing) {
- aliasAction.routing(routing);
+ /**
+ * Set the alias this action is operating on.
+ */
+ public AliasActions alias(String alias) {
+ if (type == AliasActions.Type.REMOVE_INDEX) {
+ throw new IllegalArgumentException("[alias] is unsupported for [" + type + "]");
+ }
+ if (false == Strings.hasLength(alias)) {
+ throw new IllegalArgumentException("[alias] can't be empty string");
+ }
+ this.aliases = new String[] {alias};
+ return this;
}
- public void searchRouting(String searchRouting) {
- aliasAction.searchRouting(searchRouting);
+ /**
+ * Set the default routing.
+ */
+ public AliasActions routing(String routing) {
+ if (type != AliasActions.Type.ADD) {
+ throw new IllegalArgumentException("[routing] is unsupported for [" + type + "]");
+ }
+ this.routing = routing;
+ return this;
}
- public void indexRouting(String indexRouting) {
- aliasAction.indexRouting(indexRouting);
+ public String searchRouting() {
+ return searchRouting == null ? routing : searchRouting;
}
- public AliasActions filter(String filter) {
- aliasAction.filter(filter);
+ public AliasActions searchRouting(String searchRouting) {
+ if (type != AliasActions.Type.ADD) {
+ throw new IllegalArgumentException("[search_routing] is unsupported for [" + type + "]");
+ }
+ this.searchRouting = searchRouting;
return this;
}
- @Override
- public AliasActions indices(String... indices) {
- this.indices = indices;
+ public String indexRouting() {
+ return indexRouting == null ? routing : indexRouting;
+ }
+
+ public AliasActions indexRouting(String indexRouting) {
+ if (type != AliasActions.Type.ADD) {
+ throw new IllegalArgumentException("[index_routing] is unsupported for [" + type + "]");
+ }
+ this.indexRouting = indexRouting;
return this;
}
- @Override
- public AliasActions aliases(String... aliases) {
- this.aliases = aliases;
+ public String filter() {
+ return filter;
+ }
+
+ public AliasActions filter(String filter) {
+ if (type != AliasActions.Type.ADD) {
+ throw new IllegalArgumentException("[filter] is unsupported for [" + type + "]");
+ }
+ this.filter = filter;
return this;
}
+ public AliasActions filter(Map<String, Object> filter) {
+ if (filter == null || filter.isEmpty()) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(filter);
+ this.filter = builder.string();
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
+ }
+ }
+
+ public AliasActions filter(QueryBuilder filter) {
+ if (filter == null) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ filter.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.close();
+ this.filter = builder.string();
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
+ }
+ }
+
@Override
public String[] aliases() {
return aliases;
@@ -157,7 +393,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
@Override
public boolean expandAliasesWildcards() {
//remove operations support wildcards among aliases, add operations don't
- return aliasAction.actionType() == Type.REMOVE;
+ return type == Type.REMOVE;
}
@Override
@@ -170,10 +406,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
return INDICES_OPTIONS;
}
- public AliasAction aliasAction() {
- return aliasAction;
- }
-
public String[] concreteAliases(MetaData metaData, String concreteIndex) {
if (expandAliasesWildcards()) {
//for DELETE we expand the aliases
@@ -191,83 +423,48 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
return aliases;
}
}
- public AliasActions readFrom(StreamInput in) throws IOException {
- indices = in.readStringArray();
- aliases = in.readStringArray();
- aliasAction = readAliasAction(in);
- return this;
- }
- public void writeTo(StreamOutput out) throws IOException {
- out.writeStringArray(indices);
- out.writeStringArray(aliases);
- this.aliasAction.writeTo(out);
+ @Override
+ public String toString() {
+ return "AliasActions["
+ + "type=" + type
+ + ",indices=" + Arrays.toString(indices)
+ + ",aliases=" + Arrays.deepToString(aliases)
+ + ",filter=" + filter
+ + ",routing=" + routing
+ + ",indexRouting=" + indexRouting
+ + ",searchRouting=" + searchRouting
+ + "]";
}
- }
-
- /**
- * Adds an alias to the index.
- * @param alias The alias
- * @param indices The indices
- */
- public IndicesAliasesRequest addAlias(String alias, String... indices) {
- addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias));
- return this;
- }
-
-
- public void addAliasAction(AliasActions aliasAction) {
- allAliasActions.add(aliasAction);
- }
-
-
- public IndicesAliasesRequest addAliasAction(AliasAction action) {
- addAliasAction(new AliasActions(action));
- return this;
- }
-
- /**
- * Adds an alias to the index.
- * @param alias The alias
- * @param filter The filter
- * @param indices The indices
- */
- public IndicesAliasesRequest addAlias(String alias, Map<String, Object> filter, String... indices) {
- addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter));
- return this;
- }
-
- /**
- * Adds an alias to the index.
- * @param alias The alias
- * @param filterBuilder The filter
- * @param indices The indices
- */
- public IndicesAliasesRequest addAlias(String alias, QueryBuilder filterBuilder, String... indices) {
- addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
- return this;
- }
+ // equals, and hashCode implemented for easy testing of round trip
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != getClass()) {
+ return false;
+ }
+ AliasActions other = (AliasActions) obj;
+ return Objects.equals(type, other.type)
+ && Arrays.equals(indices, other.indices)
+ && Arrays.equals(aliases, other.aliases)
+ && Objects.equals(filter, other.filter)
+ && Objects.equals(routing, other.routing)
+ && Objects.equals(indexRouting, other.indexRouting)
+ && Objects.equals(searchRouting, other.searchRouting);
+ }
- /**
- * Removes an alias to the index.
- *
- * @param indices The indices
- * @param aliases The aliases
- */
- public IndicesAliasesRequest removeAlias(String[] indices, String... aliases) {
- addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
- return this;
+ @Override
+ public int hashCode() {
+ return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting);
+ }
}
/**
- * Removes an alias to the index.
- *
- * @param index The index
- * @param aliases The aliases
+ * Add the action to this request and validate it.
*/
- public IndicesAliasesRequest removeAlias(String index, String... aliases) {
- addAliasAction(new AliasActions(AliasAction.Type.REMOVE, index, aliases));
+ public IndicesAliasesRequest addAliasAction(AliasActions aliasAction) {
+ aliasAction.validate();
+ allAliasActions.add(aliasAction);
return this;
}
@@ -285,50 +482,20 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
if (allAliasActions.isEmpty()) {
return addValidationError("Must specify at least one alias action", validationException);
}
- for (AliasActions aliasAction : allAliasActions) {
- if (CollectionUtils.isEmpty(aliasAction.aliases)) {
- validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
- + "]: Property [alias/aliases] is either missing or null", validationException);
- } else {
- for (String alias : aliasAction.aliases) {
- if (!Strings.hasText(alias)) {
- validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
- + "]: [alias/aliases] may not be empty string", validationException);
- }
- }
- }
- if (CollectionUtils.isEmpty(aliasAction.indices)) {
- validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
- + "]: Property [index/indices] is either missing or null", validationException);
- } else {
- for (String index : aliasAction.indices) {
- if (!Strings.hasText(index)) {
- validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
- + "]: [index/indices] may not be empty string", validationException);
- }
- }
- }
- }
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- int size = in.readVInt();
- for (int i = 0; i < size; i++) {
- allAliasActions.add(readAliasActions(in));
- }
+ allAliasActions = in.readList(AliasActions::new);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
- out.writeVInt(allAliasActions.size());
- for (AliasActions aliasAction : allAliasActions) {
- aliasAction.writeTo(out);
- }
+ out.writeList(allAliasActions);
writeTimeout(out);
}
@@ -336,11 +503,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
return INDICES_OPTIONS;
}
- private static AliasActions readAliasActions(StreamInput in) throws IOException {
- AliasActions actions = new AliasActions();
- return actions.readFrom(in);
- }
-
@Override
public List<? extends IndicesRequest> subRequests() {
return allAliasActions;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java
index df44d8e97f..53c8c35de6 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java
@@ -22,15 +22,15 @@ package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
-import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.index.query.QueryBuilder;
import java.util.Map;
/**
- *
+ * Builder for request to modify many aliases at once.
*/
-public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
+public class IndicesAliasesRequestBuilder
+ extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
public IndicesAliasesRequestBuilder(ElasticsearchClient client, IndicesAliasesAction action) {
super(client, action, new IndicesAliasesRequest());
@@ -43,7 +43,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param alias The alias
*/
public IndicesAliasesRequestBuilder addAlias(String index, String alias) {
- request.addAlias(alias, index);
+ request.addAliasAction(AliasActions.add().index(index).alias(alias));
return this;
}
@@ -54,7 +54,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param alias The alias
*/
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) {
- request.addAlias(alias, indices);
+ request.addAliasAction(AliasActions.add().indices(indices).alias(alias));
return this;
}
@@ -66,8 +66,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) {
- AliasActions action = new AliasActions(AliasAction.Type.ADD, index, alias).filter(filter);
- request.addAliasAction(action);
+ request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
return this;
}
@@ -79,8 +78,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, String filter) {
- AliasActions action = new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter);
- request.addAliasAction(action);
+ request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
return this;
}
@@ -92,7 +90,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias, Map<String, Object> filter) {
- request.addAlias(alias, filter, indices);
+ request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
return this;
}
@@ -104,7 +102,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) {
- request.addAlias(alias, filter, index);
+ request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
return this;
}
@@ -116,7 +114,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filterBuilder The filter
*/
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, QueryBuilder filterBuilder) {
- request.addAlias(alias, filterBuilder, indices);
+ request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filterBuilder));
return this;
}
@@ -128,7 +126,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filterBuilder The filter
*/
public IndicesAliasesRequestBuilder addAlias(String index, String alias, QueryBuilder filterBuilder) {
- request.addAlias(alias, filterBuilder, index);
+ request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filterBuilder));
return this;
}
@@ -139,7 +137,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param alias The alias
*/
public IndicesAliasesRequestBuilder removeAlias(String index, String alias) {
- request.removeAlias(index, alias);
+ request.addAliasAction(AliasActions.remove().index(index).alias(alias));
return this;
}
@@ -150,7 +148,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param aliases The aliases
*/
public IndicesAliasesRequestBuilder removeAlias(String[] indices, String... aliases) {
- request.removeAlias(indices, aliases);
+ request.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases));
return this;
}
@@ -161,17 +159,12 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param aliases The aliases
*/
public IndicesAliasesRequestBuilder removeAlias(String index, String[] aliases) {
- request.removeAlias(index, aliases);
+ request.addAliasAction(AliasActions.remove().index(index).aliases(aliases));
return this;
}
- /**
- * Adds an alias action to the request.
- *
- * @param aliasAction The alias action
- */
- public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
- request.addAliasAction(aliasAction);
+ public IndicesAliasesRequestBuilder removeIndex(String index) {
+ request.addAliasAction(AliasActions.removeIndex().index(index));
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java
index 6b44609fe1..44de63c028 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java
@@ -43,6 +43,8 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import static java.util.Collections.unmodifiableList;
+
/**
* Add/remove aliases action
*/
@@ -86,31 +88,38 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
//Expand the indices names
List<AliasActions> actions = request.aliasActions();
List<AliasAction> finalActions = new ArrayList<>();
- boolean hasOnlyDeletesButNoneCanBeDone = true;
+
+ // Resolve all the AliasActions into AliasAction instances and gather all the aliases
Set<String> aliases = new HashSet<>();
for (AliasActions action : actions) {
- //expand indices
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
- //collect the aliases
Collections.addAll(aliases, action.aliases());
for (String index : concreteIndices) {
- for (String alias : action.concreteAliases(state.metaData(), index)) {
- AliasAction finalAction = new AliasAction(action.aliasAction());
- finalAction.index(index);
- finalAction.alias(alias);
- finalActions.add(finalAction);
- //if there is only delete requests, none will be added if the types do not map to any existing type
- hasOnlyDeletesButNoneCanBeDone = false;
+ switch (action.actionType()) {
+ case ADD:
+ for (String alias : action.concreteAliases(state.metaData(), index)) {
+ finalActions.add(new AliasAction.Add(index, alias, action.filter(), action.indexRouting(), action.searchRouting()));
+ }
+ break;
+ case REMOVE:
+ for (String alias : action.concreteAliases(state.metaData(), index)) {
+ finalActions.add(new AliasAction.Remove(index, alias));
+ }
+ break;
+ case REMOVE_INDEX:
+ finalActions.add(new AliasAction.RemoveIndex(index));
+ break;
+ default:
+ throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]");
}
}
}
- if (hasOnlyDeletesButNoneCanBeDone && actions.size() != 0) {
+ if (finalActions.isEmpty() && false == actions.isEmpty()) {
throw new AliasesNotFoundException(aliases.toArray(new String[aliases.size()]));
}
request.aliasActions().clear();
- IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
- .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
- .actions(finalActions.toArray(new AliasAction[finalActions.size()]));
+ IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(unmodifiableList(finalActions))
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout());
indexAliasesService.indicesAliases(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java
index c67c036023..2d1ba22b98 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java
@@ -292,7 +292,7 @@ public class DetailAnalyzeResponse implements Streamable, ToXContent {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, name);
- builder.field(Fields.FILTERED_TEXT, texts);
+ builder.array(Fields.FILTERED_TEXT, texts);
builder.endObject();
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java
index f035bc0f4b..7d7e9d2dd2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java
@@ -45,9 +45,9 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
@@ -145,45 +145,46 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
}
}
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
- return analyze(request, field, analyzer, indexService != null ? indexService.analysisService() : null, analysisRegistry, environment);
+ return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, analysisRegistry, environment);
} catch (IOException e) {
throw new ElasticsearchException("analysis failed", e);
}
}
- public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, AnalysisService analysisService, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
+ public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
boolean closeAnalyzer = false;
if (analyzer == null && request.analyzer() != null) {
- if (analysisService == null) {
+ if (indexAnalyzers == null) {
analyzer = analysisRegistry.getAnalyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]");
}
} else {
- analyzer = analysisService.analyzer(request.analyzer());
+ analyzer = indexAnalyzers.get(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
}
} else if (request.tokenizer() != null) {
- TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
+ final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
+ TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, analysisRegistry, environment);
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
- tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
+ tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories);
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
- charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
+ charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories);
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
closeAnalyzer = true;
} else if (analyzer == null) {
- if (analysisService == null) {
+ if (indexAnalyzers == null) {
analyzer = analysisRegistry.getAnalyzer("standard");
} else {
- analyzer = analysisService.defaultIndexAnalyzer();
+ analyzer = indexAnalyzers.getDefaultIndexAnalyzer();
}
}
if (analyzer == null) {
@@ -446,7 +447,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return extendedAttributes;
}
- private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
+ private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
if (request.charFilters() != null && request.charFilters().size() > 0) {
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
@@ -468,19 +469,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
} else {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory;
- if (analysisService == null) {
+ if (indexSettings == null) {
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
} else {
- charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, analysisService.getIndexSettings());
+ charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, indexSettings);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
- charFilterFactories[i] = charFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, charFilter.name,
- AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
+ charFilterFactories[i] = charFilterFactoryFactory.get(indexSettings, environment, charFilter.name,
+ AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_CHAR_FILTER + "." + charFilter.name));
}
}
@@ -492,7 +493,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return charFilterFactories;
}
- private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
+ private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
@@ -514,19 +515,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
} else {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory;
- if (analysisService == null) {
+ if (indexSettings == null) {
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
} else {
- tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, analysisService.getIndexSettings());
+ tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, indexSettings);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
}
- tokenFilterFactories[i] = tokenFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenFilter.name,
- AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
+ tokenFilterFactories[i] = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name,
+ AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name));
}
}
@@ -538,7 +539,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return tokenFilterFactories;
}
- private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
+ private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
TokenizerFactory tokenizerFactory;
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
@@ -558,19 +559,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
} else {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
- if (analysisService == null) {
+ if (indexAnalzyers == null) {
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
} else {
- tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, analysisService.getIndexSettings());
+ tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
}
- tokenizerFactory = tokenizerFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenizer.name,
- AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
+ tokenizerFactory = tokenizerFactoryFactory.get(indexAnalzyers.getIndexSettings(), environment, tokenizer.name,
+ AnalysisRegistry.getSettingsFromIndexSettings(indexAnalzyers.getIndexSettings(),
AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizer.name));
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
index 06810c4dcd..d33f37defe 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.close;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override
public void onFailure(Exception t) {
- logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java
index 7b3b2a0a2f..a2290a5e25 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java
@@ -41,6 +41,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final TransportMessage originalMessage;
private final String cause;
private final String index;
+ private final String providedName;
private final boolean updateAllTypes;
private Index shrinkFrom;
@@ -59,11 +60,13 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
- public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
+ public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName,
+ boolean updateAllTypes) {
this.originalMessage = originalMessage;
this.cause = cause;
this.index = index;
this.updateAllTypes = updateAllTypes;
+ this.providedName = providedName;
}
public CreateIndexClusterStateUpdateRequest settings(Settings settings) {
@@ -151,6 +154,14 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return updateAllTypes;
}
+ /**
+ * The name that was provided by the user. This might contain a date math expression.
+ * @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME
+ */
+ public String getProvidedName() {
+ return providedName;
+ }
+
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java
index d3ce1975e8..354dcf2387 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java
@@ -72,7 +72,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
}
final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index());
- final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.updateAllTypes())
+ final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs())
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
index 947936bddc..251eed8bdb 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.delete;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
public void onFailure(Exception t) {
- logger.debug("failed to delete indices [{}]", t, concreteIndices);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
listener.onFailure(t);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
index 7dc55c08fa..f91b69755c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
@@ -40,7 +40,7 @@ import java.io.IOException;
public class FlushRequest extends BroadcastRequest<FlushRequest> {
private boolean force = false;
- private boolean waitIfOngoing = false;
+ private boolean waitIfOngoing = true;
/**
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
@@ -61,6 +61,7 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
/**
* if set to <tt>true</tt> the flush will block
* if a another flush operation is already running until the flush can be performed.
+ * The default is <code>true</code>
*/
public FlushRequest waitIfOngoing(boolean waitIfOngoing) {
this.waitIfOngoing = waitIfOngoing;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
index 6887857925..0dd81075b8 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
@@ -77,7 +77,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
return feature;
}
}
- throw new IllegalArgumentException("No feature for name [" + name + "]");
+ throw new IllegalArgumentException("No endpoint or operation is available at [" + name + "]");
}
public static Feature fromId(byte id) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java
index a03472262e..d9ebf88fda 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.mapping.put;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -92,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override
public void onFailure(Exception t) {
- logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
listener.onFailure(t);
}
});
} catch (IndexNotFoundException ex) {
- logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
throw ex;
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
index d672d55b32..1128ebf987 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.open;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@@ -93,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override
public void onFailure(Exception t) {
- logger.debug("failed to open indices [{}]", t, (Object)concreteIndices);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
index 82c54f8a1b..aeb2fe78df 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
@@ -47,11 +47,15 @@ import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
+import java.util.Arrays;
+import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+import static java.util.Collections.unmodifiableList;
+
/**
* Main class to swap the index pointed to by an alias, given some conditions
*/
@@ -102,6 +106,8 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
validate(metaData, rolloverRequest);
final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(rolloverRequest.getAlias());
final IndexMetaData indexMetaData = aliasOrIndex.getIndices().get(0);
+ final String sourceProvidedName = indexMetaData.getSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME,
+ indexMetaData.getIndex().getName());
final String sourceIndexName = indexMetaData.getIndex().getName();
client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute(
new ActionListener<IndicesStatsResponse>() {
@@ -109,16 +115,18 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
public void onResponse(IndicesStatsResponse statsResponse) {
final Set<Condition.Result> conditionResults = evaluateConditions(rolloverRequest.getConditions(),
statsResponse.getTotal().getDocs(), metaData.index(sourceIndexName));
- final String rolloverIndexName = (rolloverRequest.getNewIndexName() != null)
+ final String unresolvedName = (rolloverRequest.getNewIndexName() != null)
? rolloverRequest.getNewIndexName()
- : generateRolloverIndexName(sourceIndexName);
+ : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver);
+ final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName);
if (rolloverRequest.isDryRun()) {
listener.onResponse(
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false));
return;
}
if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) {
- CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(rolloverIndexName, rolloverRequest);
+ CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName,
+ rolloverRequest);
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
// switch the alias to point to the newly created index
indexAliasesService.indicesAliases(
@@ -156,23 +164,27 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesUpdateRequest(String oldIndex, String newIndex,
RolloverRequest request) {
- final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
+ List<AliasAction> actions = unmodifiableList(Arrays.asList(
+ new AliasAction.Add(newIndex, request.getAlias(), null, null, null),
+ new AliasAction.Remove(oldIndex, request.getAlias())));
+ final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions)
.ackTimeout(request.ackTimeout())
.masterNodeTimeout(request.masterNodeTimeout());
- AliasAction[] actions = new AliasAction[2];
- actions[0] = new AliasAction(AliasAction.Type.ADD, newIndex, request.getAlias());
- actions[1] = new AliasAction(AliasAction.Type.REMOVE, oldIndex, request.getAlias());
- updateRequest.actions(actions);
return updateRequest;
}
- static String generateRolloverIndexName(String sourceIndexName) {
- if (INDEX_NAME_PATTERN.matcher(sourceIndexName).matches()) {
+ static String generateRolloverIndexName(String sourceIndexName, IndexNameExpressionResolver indexNameExpressionResolver) {
+ String resolvedName = indexNameExpressionResolver.resolveDateMathExpression(sourceIndexName);
+ final boolean isDateMath = sourceIndexName.equals(resolvedName) == false;
+ if (INDEX_NAME_PATTERN.matcher(resolvedName).matches()) {
int numberIndex = sourceIndexName.lastIndexOf("-");
assert numberIndex != -1 : "no separator '-' found";
- int counter = Integer.parseInt(sourceIndexName.substring(numberIndex + 1));
- return String.join("-", sourceIndexName.substring(0, numberIndex), String.format(Locale.ROOT, "%06d", ++counter));
+ int counter = Integer.parseInt(sourceIndexName.substring(numberIndex + 1, isDateMath ? sourceIndexName.length()-1 :
+ sourceIndexName.length()));
+ String newName = sourceIndexName.substring(0, numberIndex) + "-" + String.format(Locale.ROOT, "%06d", ++counter)
+ + (isDateMath ? ">" : "");
+ return newName;
} else {
throw new IllegalArgumentException("index name [" + sourceIndexName + "] does not match pattern '^.*-(\\d)+$'");
}
@@ -200,14 +212,14 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
}
}
- static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final String targetIndexName,
+ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final String providedIndexName, final String targetIndexName,
final RolloverRequest rolloverRequest) {
final CreateIndexRequest createIndexRequest = rolloverRequest.getCreateIndexRequest();
createIndexRequest.cause("rollover_index");
createIndexRequest.index(targetIndexName);
return new CreateIndexClusterStateUpdateRequest(createIndexRequest,
- "rollover_index", targetIndexName, true)
+ "rollover_index", targetIndexName, providedIndexName, true)
.ackTimeout(createIndexRequest.timeout())
.masterNodeTimeout(createIndexRequest.masterNodeTimeout())
.settings(createIndexRequest.settings())
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
index 5655400465..f9ebff0663 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.settings.put;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -92,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override
public void onFailure(Exception t) {
- logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
index dbfcdaf163..e13578d66d 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.shards;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.gateway.AsyncShardFetch;
@@ -94,14 +94,13 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
logger.trace("using cluster state version [{}] to determine shards", state.version());
// collect relevant shard ids of the requested indices for fetching store infos
for (String index : concreteIndices) {
- IndexMetaData indexMetaData = state.metaData().index(index);
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
if (indexShardRoutingTables == null) {
continue;
}
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
final int shardId = routing.shardId().id();
- ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, indexMetaData);
+ ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing);
if (request.shardStatuses().contains(shardHealth.getStatus())) {
shardIdsToFetch.add(routing.shardId());
}
@@ -151,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> {
- InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
+ InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
super(logger, type, shardId, action);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java
index 4667f1e982..6d27b03db6 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java
@@ -104,10 +104,10 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
}
// static for unittesting this method
- static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkReqeust, final ClusterState state
+ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkRequest, final ClusterState state
, final IntFunction<DocsStats> perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) {
- final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkReqeust.getSourceIndex());
- final CreateIndexRequest targetIndex = shrinkReqeust.getShrinkIndexRequest();
+ final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex());
+ final CreateIndexRequest targetIndex = shrinkRequest.getShrinkIndexRequest();
final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index());
final IndexMetaData metaData = state.metaData().index(sourceIndex);
final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings())
@@ -137,7 +137,7 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(targetIndex,
- "shrink_index", targetIndexName, true)
+ "shrink_index", targetIndex.index(), targetIndexName, true)
// mappings are updated on the node when merging in the shards, this prevents race-conditions since all mapping must be
// applied once we took the snapshot and if somebody fucks things up and switches the index read/write and adds docs we miss
// the mappings for everything is corrupted and hard to debug
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
index 14fc6c05e5..ce90858f49 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
@@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -32,13 +32,13 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats;
-import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.index.warmer.WarmerStats;
@@ -47,9 +47,55 @@ import org.elasticsearch.search.suggest.completion.CompletionStats;
import java.io.IOException;
-/**
- */
-public class CommonStats implements Streamable, ToXContent {
+public class CommonStats implements Writeable, ToXContent {
+
+ @Nullable
+ public DocsStats docs;
+
+ @Nullable
+ public StoreStats store;
+
+ @Nullable
+ public IndexingStats indexing;
+
+ @Nullable
+ public GetStats get;
+
+ @Nullable
+ public SearchStats search;
+
+ @Nullable
+ public MergeStats merge;
+
+ @Nullable
+ public RefreshStats refresh;
+
+ @Nullable
+ public FlushStats flush;
+
+ @Nullable
+ public WarmerStats warmer;
+
+ @Nullable
+ public QueryCacheStats queryCache;
+
+ @Nullable
+ public FieldDataStats fieldData;
+
+ @Nullable
+ public CompletionStats completion;
+
+ @Nullable
+ public SegmentsStats segments;
+
+ @Nullable
+ public TranslogStats translog;
+
+ @Nullable
+ public RequestCacheStats requestCache;
+
+ @Nullable
+ public RecoveryStats recoveryStats;
public CommonStats() {
this(CommonStatsFlags.NONE);
@@ -117,11 +163,8 @@ public class CommonStats implements Streamable, ToXContent {
}
}
-
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
-
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
-
for (CommonStatsFlags.Flag flag : setFlags) {
switch (flag) {
case Docs:
@@ -181,53 +224,135 @@ public class CommonStats implements Streamable, ToXContent {
}
}
- @Nullable
- public DocsStats docs;
-
- @Nullable
- public StoreStats store;
-
- @Nullable
- public IndexingStats indexing;
-
- @Nullable
- public GetStats get;
-
- @Nullable
- public SearchStats search;
-
- @Nullable
- public MergeStats merge;
-
- @Nullable
- public RefreshStats refresh;
-
- @Nullable
- public FlushStats flush;
-
- @Nullable
- public WarmerStats warmer;
-
- @Nullable
- public QueryCacheStats queryCache;
-
- @Nullable
- public FieldDataStats fieldData;
-
- @Nullable
- public CompletionStats completion;
-
- @Nullable
- public SegmentsStats segments;
-
- @Nullable
- public TranslogStats translog;
-
- @Nullable
- public RequestCacheStats requestCache;
+ public CommonStats(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ docs = DocsStats.readDocStats(in);
+ }
+ if (in.readBoolean()) {
+ store = StoreStats.readStoreStats(in);
+ }
+ if (in.readBoolean()) {
+ indexing = IndexingStats.readIndexingStats(in);
+ }
+ if (in.readBoolean()) {
+ get = GetStats.readGetStats(in);
+ }
+ if (in.readBoolean()) {
+ search = SearchStats.readSearchStats(in);
+ }
+ if (in.readBoolean()) {
+ merge = MergeStats.readMergeStats(in);
+ }
+ if (in.readBoolean()) {
+ refresh = RefreshStats.readRefreshStats(in);
+ }
+ if (in.readBoolean()) {
+ flush = FlushStats.readFlushStats(in);
+ }
+ if (in.readBoolean()) {
+ warmer = WarmerStats.readWarmerStats(in);
+ }
+ if (in.readBoolean()) {
+ queryCache = QueryCacheStats.readQueryCacheStats(in);
+ }
+ if (in.readBoolean()) {
+ fieldData = FieldDataStats.readFieldDataStats(in);
+ }
+ if (in.readBoolean()) {
+ completion = CompletionStats.readCompletionStats(in);
+ }
+ if (in.readBoolean()) {
+ segments = SegmentsStats.readSegmentsStats(in);
+ }
+ translog = in.readOptionalStreamable(TranslogStats::new);
+ requestCache = in.readOptionalStreamable(RequestCacheStats::new);
+ recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
+ }
- @Nullable
- public RecoveryStats recoveryStats;
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (docs == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ docs.writeTo(out);
+ }
+ if (store == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ store.writeTo(out);
+ }
+ if (indexing == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ indexing.writeTo(out);
+ }
+ if (get == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ get.writeTo(out);
+ }
+ if (search == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ search.writeTo(out);
+ }
+ if (merge == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ merge.writeTo(out);
+ }
+ if (refresh == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ refresh.writeTo(out);
+ }
+ if (flush == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ flush.writeTo(out);
+ }
+ if (warmer == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ warmer.writeTo(out);
+ }
+ if (queryCache == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ queryCache.writeTo(out);
+ }
+ if (fieldData == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ fieldData.writeTo(out);
+ }
+ if (completion == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ completion.writeTo(out);
+ }
+ if (segments == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ segments.writeTo(out);
+ }
+ out.writeOptionalStreamable(translog);
+ out.writeOptionalStreamable(requestCache);
+ out.writeOptionalStreamable(recoveryStats);
+ }
public void add(CommonStats stats) {
if (docs == null) {
@@ -441,12 +566,6 @@ public class CommonStats implements Streamable, ToXContent {
return recoveryStats;
}
- public static CommonStats readCommonStats(StreamInput in) throws IOException {
- CommonStats stats = new CommonStats();
- stats.readFrom(in);
- return stats;
- }
-
/**
* Utility method which computes total memory by adding
* FieldData, PercolatorCache, Segments (memory, index writer, version map)
@@ -468,137 +587,6 @@ public class CommonStats implements Streamable, ToXContent {
return new ByteSizeValue(size);
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
- if (in.readBoolean()) {
- docs = DocsStats.readDocStats(in);
- }
- if (in.readBoolean()) {
- store = StoreStats.readStoreStats(in);
- }
- if (in.readBoolean()) {
- indexing = IndexingStats.readIndexingStats(in);
- }
- if (in.readBoolean()) {
- get = GetStats.readGetStats(in);
- }
- if (in.readBoolean()) {
- search = SearchStats.readSearchStats(in);
- }
- if (in.readBoolean()) {
- merge = MergeStats.readMergeStats(in);
- }
- if (in.readBoolean()) {
- refresh = RefreshStats.readRefreshStats(in);
- }
- if (in.readBoolean()) {
- flush = FlushStats.readFlushStats(in);
- }
- if (in.readBoolean()) {
- warmer = WarmerStats.readWarmerStats(in);
- }
- if (in.readBoolean()) {
- queryCache = QueryCacheStats.readQueryCacheStats(in);
- }
- if (in.readBoolean()) {
- fieldData = FieldDataStats.readFieldDataStats(in);
- }
- if (in.readBoolean()) {
- completion = CompletionStats.readCompletionStats(in);
- }
- if (in.readBoolean()) {
- segments = SegmentsStats.readSegmentsStats(in);
- }
- translog = in.readOptionalStreamable(TranslogStats::new);
- requestCache = in.readOptionalStreamable(RequestCacheStats::new);
- recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- if (docs == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- docs.writeTo(out);
- }
- if (store == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- store.writeTo(out);
- }
- if (indexing == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- indexing.writeTo(out);
- }
- if (get == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- get.writeTo(out);
- }
- if (search == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- search.writeTo(out);
- }
- if (merge == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- merge.writeTo(out);
- }
- if (refresh == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- refresh.writeTo(out);
- }
- if (flush == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- flush.writeTo(out);
- }
- if (warmer == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- warmer.writeTo(out);
- }
- if (queryCache == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- queryCache.writeTo(out);
- }
- if (fieldData == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- fieldData.writeTo(out);
- }
- if (completion == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- completion.writeTo(out);
- }
- if (segments == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- segments.writeTo(out);
- }
- out.writeOptionalStreamable(translog);
- out.writeOptionalStreamable(requestCache);
- out.writeOptionalStreamable(recoveryStats);
- }
-
// note, requires a wrapping object
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
index a9af50b249..7d6e7c124c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
@@ -19,17 +19,15 @@
package org.elasticsearch.action.admin.indices.stats;
-import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
+import java.util.Collections;
import java.util.EnumSet;
-/**
- */
-public class CommonStatsFlags implements Streamable, Cloneable {
+public class CommonStatsFlags implements Writeable, Cloneable {
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
public static final CommonStatsFlags NONE = new CommonStatsFlags().clear();
@@ -41,19 +39,45 @@ public class CommonStatsFlags implements Streamable, Cloneable {
private String[] completionDataFields = null;
private boolean includeSegmentFileSizes = false;
-
/**
* @param flags flags to set. If no flags are supplied, default flags will be set.
*/
public CommonStatsFlags(Flag... flags) {
if (flags.length > 0) {
clear();
- for (Flag f : flags) {
- this.flags.add(f);
+ Collections.addAll(this.flags, flags);
+ }
+ }
+
+ public CommonStatsFlags(StreamInput in) throws IOException {
+ final long longFlags = in.readLong();
+ flags.clear();
+ for (Flag flag : Flag.values()) {
+ if ((longFlags & (1 << flag.ordinal())) != 0) {
+ flags.add(flag);
}
}
+ types = in.readStringArray();
+ groups = in.readStringArray();
+ fieldDataFields = in.readStringArray();
+ completionDataFields = in.readStringArray();
+ includeSegmentFileSizes = in.readBoolean();
}
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ long longFlags = 0;
+ for (Flag flag : flags) {
+ longFlags |= (1 << flag.ordinal());
+ }
+ out.writeLong(longFlags);
+
+ out.writeStringArrayNullable(types);
+ out.writeStringArrayNullable(groups);
+ out.writeStringArrayNullable(fieldDataFields);
+ out.writeStringArrayNullable(completionDataFields);
+ out.writeBoolean(includeSegmentFileSizes);
+ }
/**
* Sets all flags to return all stats.
@@ -162,7 +186,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
flags.add(flag);
}
-
public CommonStatsFlags set(Flag flag, boolean add) {
if (add) {
set(flag);
@@ -172,49 +195,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
return this;
}
- public static CommonStatsFlags readCommonStatsFlags(StreamInput in) throws IOException {
- CommonStatsFlags flags = new CommonStatsFlags();
- flags.readFrom(in);
- return flags;
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- long longFlags = 0;
- for (Flag flag : flags) {
- longFlags |= (1 << flag.ordinal());
- }
- out.writeLong(longFlags);
-
- out.writeStringArrayNullable(types);
- out.writeStringArrayNullable(groups);
- out.writeStringArrayNullable(fieldDataFields);
- out.writeStringArrayNullable(completionDataFields);
- if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
- out.writeBoolean(includeSegmentFileSizes);
- }
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- final long longFlags = in.readLong();
- flags.clear();
- for (Flag flag : Flag.values()) {
- if ((longFlags & (1 << flag.ordinal())) != 0) {
- flags.add(flag);
- }
- }
- types = in.readStringArray();
- groups = in.readStringArray();
- fieldDataFields = in.readStringArray();
- completionDataFields = in.readStringArray();
- if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
- includeSegmentFileSizes = in.readBoolean();
- } else {
- includeSegmentFileSizes = false;
- }
- }
-
@Override
public CommonStatsFlags clone() {
try {
@@ -226,7 +206,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
}
}
- public static enum Flag {
+ public enum Flag {
// Do not change the order of these flags we use
// the ordinal for encoding! Only append to the end!
Store("store"),
@@ -247,7 +227,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
RequestCache("request_cache"),
Recovery("recovery");
-
private final String restName;
Flag(String restName) {
@@ -257,6 +236,5 @@ public class CommonStatsFlags implements Streamable, Cloneable {
public String getRestName() {
return restName;
}
-
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java
index 2308c9bae6..e4357f7ba1 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java
@@ -274,6 +274,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- flags = CommonStatsFlags.readCommonStatsFlags(in);
+ flags = new CommonStatsFlags(in);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java
index 6640defc1c..5bc6ce8106 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java
@@ -90,7 +90,7 @@ public class ShardStats implements Streamable, ToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
shardRouting = new ShardRouting(in);
- commonStats = CommonStats.readCommonStats(in);
+ commonStats = new CommonStats(in);
commitStats = CommitStats.readOptionalCommitStatsFrom(in);
statePath = in.readString();
dataPath = in.readString();
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java
index 33addcb844..bb18b57fa9 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.admin.indices.template.delete;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -73,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
@Override
public void onFailure(Exception e) {
- logger.debug("failed to delete templates [{}]", e, request.name());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
listener.onFailure(e);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java
index 7693419df4..a519bd8bf5 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java
@@ -27,9 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import static java.util.Collections.singletonMap;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
index 76c665c7b8..5be044ea58 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
@@ -74,6 +74,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
private Map<String, IndexMetaData.Custom> customs = new HashMap<>();
+ private Integer version;
+
public PutIndexTemplateRequest() {
}
@@ -129,6 +131,15 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return this.order;
}
+ public PutIndexTemplateRequest version(Integer version) {
+ this.version = version;
+ return this;
+ }
+
+ public Integer version() {
+ return this.version;
+ }
+
/**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
@@ -278,16 +289,23 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
template(entry.getValue().toString());
} else if (name.equals("order")) {
order(XContentMapValues.nodeIntegerValue(entry.getValue(), order()));
+ } else if ("version".equals(name)) {
+ if ((entry.getValue() instanceof Integer) == false) {
+ throw new IllegalArgumentException("Malformed [version] value, should be an integer");
+ }
+ version((Integer)entry.getValue());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
- throw new IllegalArgumentException("Malformed settings section, should include an inner object");
+ throw new IllegalArgumentException("Malformed [settings] section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("mappings")) {
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
if (!(entry1.getValue() instanceof Map)) {
- throw new IllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping");
+ throw new IllegalArgumentException(
+ "Malformed [mappings] section for type [" + entry1.getKey() +
+ "], should include an inner object describing the mapping");
}
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
}
@@ -449,6 +467,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
for (int i = 0; i < aliasesSize; i++) {
aliases.add(Alias.read(in));
}
+ version = in.readOptionalVInt();
}
@Override
@@ -474,5 +493,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
for (Alias alias : aliases) {
alias.writeTo(out);
}
+ out.writeOptionalVInt(version);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java
index 5207cacf6b..8acc2b3f61 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java
@@ -30,7 +30,8 @@ import java.util.Map;
/**
*
*/
-public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
+public class PutIndexTemplateRequestBuilder
+ extends MasterNodeOperationRequestBuilder<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action) {
super(client, action, new PutIndexTemplateRequest());
@@ -57,6 +58,14 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu
}
/**
+ * Sets the optional version of this template.
+ */
+ public PutIndexTemplateRequestBuilder setVersion(Integer version) {
+ request.version(version);
+ return this;
+ }
+
+ /**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
*/
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java
index 0d14c4d24d..77746b395e 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.admin.indices.template.put;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -84,7 +86,8 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
.aliases(request.aliases())
.customs(request.customs())
.create(request.create())
- .masterTimeout(request.masterNodeTimeout()),
+ .masterTimeout(request.masterNodeTimeout())
+ .version(request.version()),
new MetaDataIndexTemplateService.PutListener() {
@Override
@@ -94,7 +97,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
@Override
public void onFailure(Exception e) {
- logger.debug("failed to put template [{}]", e, request.name());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
listener.onFailure(e);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java
index 3b77892086..f467c6ae74 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.indices.upgrade.post;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -79,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
@Override
public void onFailure(Exception t) {
- logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
listener.onFailure(t);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java
index d1405e92e1..718d3b25e6 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java
@@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardException;
-import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
-import org.elasticsearch.search.fetch.FetchPhase;
-import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.tasks.Task;
@@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
*/
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
- private final IndicesService indicesService;
-
- private final ScriptService scriptService;
-
- private final BigArrays bigArrays;
-
- private final FetchPhase fetchPhase;
+ private final SearchService searchService;
@Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
- TransportService transportService, IndicesService indicesService, ScriptService scriptService,
- BigArrays bigArrays, ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
+ TransportService transportService, SearchService searchService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
- this.indicesService = indicesService;
- this.scriptService = scriptService;
- this.bigArrays = bigArrays;
- this.fetchPhase = fetchPhase;
+ this.searchService = searchService;
}
@Override
@@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
- IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
- IndexShard indexShard = indexService.getShard(request.shardId().id());
boolean valid;
String explanation = null;
String error = null;
- Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
-
- DefaultSearchContext searchContext = new DefaultSearchContext(0,
- new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
- indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
- parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
+ ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
+ request.nowInMillis(), request.filteringAliases());
+ SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
SearchContext.setCurrent(searchContext);
try {
- searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query()));
- searchContext.preProcess();
-
+ ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
+ searchContext.parsedQuery(parsedQuery);
+ searchContext.preProcess(request.rewrite());
valid = true;
- if (request.rewrite()) {
- explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
- } else if (request.explain()) {
- explanation = searchContext.filteredQuery().query().toString();
- }
+ explanation = explain(searchContext, request.rewrite());
} catch (QueryShardException|ParsingException e) {
valid = false;
error = e.getDetailedMessage();
@@ -191,19 +166,18 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
valid = false;
error = e.getMessage();
} finally {
- searchContext.close();
- SearchContext.removeCurrent();
+ Releasables.close(searchContext, () -> SearchContext.removeCurrent());
}
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
}
- private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException {
- Query queryRewrite = searcher.rewrite(query);
- if (queryRewrite instanceof MatchNoDocsQuery) {
- return query.toString();
+ private String explain(SearchContext context, boolean rewritten) throws IOException {
+ Query query = context.query();
+ if (rewritten && query instanceof MatchNoDocsQuery) {
+ return context.parsedQuery().query().toString();
} else {
- return queryRewrite.toString();
+ return query.toString();
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
index 4881a9444b..f32bfaa775 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
@@ -190,7 +190,7 @@ public class BulkProcessor implements Closeable {
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
- this.bulkSize = bulkSize.bytes();
+ this.bulkSize = bulkSize.getBytes();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
@@ -293,7 +293,7 @@ public class BulkProcessor implements Closeable {
}
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
- bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
+ bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
executeIfNeeded();
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
index 538dfc4c3a..f583a29099 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -36,12 +36,15 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
+import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.ArrayList;
@@ -59,6 +62,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
*/
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
+ private static final DeprecationLogger DEPRECATION_LOGGER =
+ new DeprecationLogger(Loggers.getLogger(BulkRequest.class));
private static final int REQUEST_OVERHEAD = 50;
@@ -254,17 +259,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
- return add(data, defaultIndex, defaultType, null, null, null, null, true);
+ return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
- return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
+ return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
}
- public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
+ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
XContent xContent = XContentFactory.xContent(data);
int line = 0;
int from = 0;
@@ -298,6 +303,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
String id = null;
String routing = defaultRouting;
String parent = null;
+ FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
String[] fields = defaultFields;
String timestamp = null;
TimeValue ttl = null;
@@ -350,16 +356,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
+ } else if ("_source".equals(currentFieldName)) {
+ fetchSourceContext = FetchSourceContext.parse(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
+ DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
+ } else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
+ fetchSourceContext = FetchSourceContext.parse(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
@@ -399,7 +410,10 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
.version(version).versionType(versionType)
.routing(routing)
.parent(parent)
- .source(data.slice(from, nextMarker - from));
+ .fromXContent(data.slice(from, nextMarker - from));
+ if (fetchSourceContext != null) {
+ updateRequest.fetchSource(fetchSourceContext);
+ }
if (fields != null) {
updateRequest.fields(fields);
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
index a829e4b029..6ad566ca50 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
@@ -18,9 +18,11 @@
*/
package org.elasticsearch.action.bulk;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@@ -31,7 +33,7 @@ import java.util.concurrent.TimeUnit;
* Abstracts the low-level details of bulk request handling
*/
abstract class BulkRequestHandler {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final Client client;
protected BulkRequestHandler(Client client) {
@@ -76,12 +78,12 @@ abstract class BulkRequestHandler {
listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.info("Bulk request {} has been cancelled.", e, executionId);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
} catch (Exception e) {
- logger.warn("Failed to execute bulk request {}.", e, executionId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
@@ -142,10 +144,10 @@ abstract class BulkRequestHandler {
bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.info("Bulk request {} has been cancelled.", e, executionId);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} catch (Exception e) {
- logger.warn("Failed to execute bulk request {}.", e, executionId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
index 2c16bcb5e9..ffc2407b8a 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -20,6 +20,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
+import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
@@ -101,4 +102,15 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
}
return b.toString();
}
+
+ @Override
+ public void onRetry() {
+ for (BulkItemRequest item : items) {
+ if (item.request() instanceof ReplicationRequest) {
+ // all replication requests need to be notified here as well to ie. make sure that internal optimizations are
+ // disabled see IndexRequest#canHaveDuplicates()
+ ((ReplicationRequest) item.request()).onRetry();
+ }
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
index 95778785ab..375796ae80 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
@@ -18,12 +18,12 @@
*/
package org.elasticsearch.action.bulk;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
@@ -89,7 +89,7 @@ public class Retry {
}
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
- private final ESLogger logger;
+ private final Logger logger;
private final Client client;
private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff;
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 0a9b45581e..098092ef1e 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.bulk;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteResponse;
@@ -156,11 +158,11 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
BulkItemRequest item = request.items()[requestIndex];
DocumentRequest<?> documentRequest = item.request();
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
- logger.trace("{} failed to execute bulk item ({}) {}", e, request.shardId(),
- documentRequest.opType().getLowercase(), request);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
+ request.shardId(), documentRequest.opType().getLowercase(), request), e);
} else {
- logger.debug("{} failed to execute bulk item ({}) {}", e, request.shardId(),
- documentRequest.opType().getLowercase(), request);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
+ request.shardId(), documentRequest.opType().getLowercase(), request), e);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
diff --git a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java
index 851d9e6573..fef1b307e9 100644
--- a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java
@@ -40,7 +40,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
private String routing;
private String preference;
private QueryBuilder query;
- private String[] fields;
+ private String[] storedFields;
private FetchSourceContext fetchSourceContext;
private String[] filteringAlias = Strings.EMPTY_ARRAY;
@@ -122,12 +122,12 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
}
- public String[] fields() {
- return fields;
+ public String[] storedFields() {
+ return storedFields;
}
- public ExplainRequest fields(String[] fields) {
- this.fields = fields;
+ public ExplainRequest storedFields(String[] fields) {
+ this.storedFields = fields;
return this;
}
@@ -167,8 +167,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
preference = in.readOptionalString();
query = in.readNamedWriteable(QueryBuilder.class);
filteringAlias = in.readStringArray();
- fields = in.readOptionalStringArray();
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ storedFields = in.readOptionalStringArray();
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
nowInMillis = in.readVLong();
}
@@ -181,8 +181,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
out.writeOptionalString(preference);
out.writeNamedWriteable(query);
out.writeStringArray(filteringAlias);
- out.writeOptionalStringArray(fields);
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalStringArray(storedFields);
+ out.writeOptionalWriteable(fetchSourceContext);
out.writeVLong(nowInMillis);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java
index c201315cbd..cf7b482181 100644
--- a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java
@@ -88,10 +88,10 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<Ex
}
/**
- * Explicitly specify the fields that will be returned for the explained document. By default, nothing is returned.
+ * Explicitly specify the stored fields that will be returned for the explained document. By default, nothing is returned.
*/
- public ExplainRequestBuilder setFields(String... fields) {
- request.fields(fields);
+ public ExplainRequestBuilder setStoredFields(String... fields) {
+ request.storedFields(fields);
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java
index 95177853d4..7b6421906e 100644
--- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java
+++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java
@@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
-import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
-import org.elasticsearch.search.fetch.FetchPhase;
-import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.rescore.RescoreSearchContext;
@@ -60,26 +54,15 @@ import java.io.IOException;
// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> {
- private final IndicesService indicesService;
-
- private final ScriptService scriptService;
-
-
- private final BigArrays bigArrays;
-
- private final FetchPhase fetchPhase;
+ private final SearchService searchService;
@Inject
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
- TransportService transportService, IndicesService indicesService, ScriptService scriptService,
- BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
- FetchPhase fetchPhase) {
+ TransportService transportService, SearchService searchService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ExplainRequest::new, ThreadPool.Names.GET);
- this.indicesService = indicesService;
- this.scriptService = scriptService;
- this.bigArrays = bigArrays;
- this.fetchPhase = fetchPhase;
+ this.searchService = searchService;
}
@Override
@@ -104,34 +87,30 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
- IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
- IndexShard indexShard = indexService.getShard(shardId.id());
+ ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
+ new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
+ SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
- Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
- if (!result.exists()) {
- return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
- }
-
- SearchContext context = new DefaultSearchContext(0,
- new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
- result.searcher(), indexService, indexShard, scriptService, bigArrays,
- threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(context);
-
+ Engine.GetResult result = null;
try {
+ result = context.indexShard().get(new Engine.Get(false, uidTerm));
+ if (!result.exists()) {
+ return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
+ }
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
- context.preProcess();
+ context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext ctx : context.rescore()) {
Rescorer rescorer = ctx.rescorer();
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
}
- if (request.fields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
+ if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
- GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
+ GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
@@ -139,8 +118,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
} catch (IOException e) {
throw new ElasticsearchException("Could not explain", e);
} finally {
- context.close();
- SearchContext.removeCurrent();
+ Releasables.close(result, context, () -> SearchContext.removeCurrent());
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
index 4a4f106b08..1b2f1dc5ed 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
@@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.net.InetAddress;
+import java.util.Objects;
public abstract class FieldStats<T> implements Writeable, ToXContent {
private final byte type;
@@ -46,13 +47,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
protected T minValue;
protected T maxValue;
- FieldStats(byte type, long maxDoc, boolean isSearchable, boolean isAggregatable) {
- this(type, maxDoc, 0, 0, 0, isSearchable, isAggregatable, null, null);
- }
-
FieldStats(byte type,
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
+ Objects.requireNonNull(minValue, "minValue must not be null");
+ Objects.requireNonNull(maxValue, "maxValue must not be null");
this.type = type;
this.maxDoc = maxDoc;
this.docCount = docCount;
@@ -220,14 +219,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
private void updateMinMax(T min, T max) {
- if (minValue == null) {
- minValue = min;
- } else if (min != null && compare(minValue, min) > 0) {
+ if (compare(minValue, min) > 0) {
minValue = min;
}
- if (maxValue == null) {
- maxValue = max;
- } else if (max != null && compare(maxValue, max) < 0) {
+ if (compare(maxValue, max) < 0) {
maxValue = max;
}
}
@@ -266,11 +261,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
out.writeLong(sumTotalTermFreq);
out.writeBoolean(isSearchable);
out.writeBoolean(isAggregatable);
- boolean hasMinMax = minValue != null;
- out.writeBoolean(hasMinMax);
- if (hasMinMax) {
- writeMinMax(out);
- }
+ writeMinMax(out);
}
protected abstract void writeMinMax(StreamOutput out) throws IOException;
@@ -280,9 +271,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
* otherwise <code>false</code> is returned
*/
public boolean match(IndexConstraint constraint) {
- if (minValue == null) {
- return false;
- }
int cmp;
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
@@ -307,6 +295,31 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ FieldStats<?> that = (FieldStats<?>) o;
+
+ if (type != that.type) return false;
+ if (maxDoc != that.maxDoc) return false;
+ if (docCount != that.docCount) return false;
+ if (sumDocFreq != that.sumDocFreq) return false;
+ if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
+ if (isSearchable != that.isSearchable) return false;
+ if (isAggregatable != that.isAggregatable) return false;
+ if (!minValue.equals(that.minValue)) return false;
+ return maxValue.equals(that.maxValue);
+
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
+ minValue, maxValue);
+ }
+
public static class Long extends FieldStats<java.lang.Long> {
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
@@ -315,17 +328,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
isSearchable, isAggregatable, minValue, maxValue);
}
- public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
- boolean isSearchable, boolean isAggregatable) {
- super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, null, null);
- }
-
- public Long(long maxDoc,
- boolean isSearchable, boolean isAggregatable) {
- super((byte) 0, maxDoc, isSearchable, isAggregatable);
- }
-
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
@@ -344,12 +346,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
- return minValue != null ? java.lang.Long.toString(minValue) : null;
+ return java.lang.Long.toString(minValue);
}
@Override
public String getMaxValueAsString() {
- return maxValue != null ? java.lang.Long.toString(maxValue) : null;
+ return java.lang.Long.toString(maxValue);
}
}
@@ -361,15 +363,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
- public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
- boolean isSearchable, boolean isAggregatable) {
- super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, null, null);
- }
-
- public Double(long maxDoc, boolean isSearchable, boolean isAggregatable) {
- super((byte) 1, maxDoc, isSearchable, isAggregatable);
- }
-
@Override
public int compare(java.lang.Double o1, java.lang.Double o2) {
return o1.compareTo(o2);
@@ -391,12 +384,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
- return minValue != null ? java.lang.Double.toString(minValue) : null;
+ return java.lang.Double.toString(minValue);
}
@Override
public String getMaxValueAsString() {
- return maxValue != null ? java.lang.Double.toString(maxValue) : null;
+ return java.lang.Double.toString(maxValue);
}
}
@@ -412,20 +405,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
this.formatter = formatter;
}
- public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
- boolean isSearchable, boolean isAggregatable,
- FormatDateTimeFormatter formatter) {
- super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
- null, null);
- this.formatter = formatter;
- }
-
- public Date(long maxDoc, boolean isSearchable, boolean isAggregatable,
- FormatDateTimeFormatter formatter) {
- super((byte) 2, maxDoc, isSearchable, isAggregatable);
- this.formatter = formatter;
- }
-
@Override
public int compare(java.lang.Long o1, java.lang.Long o2) {
return o1.compareTo(o2);
@@ -449,12 +428,29 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
- return minValue != null ? formatter.printer().print(minValue) : null;
+ return formatter.printer().print(minValue);
}
@Override
public String getMaxValueAsString() {
- return maxValue != null ? formatter.printer().print(maxValue) : null;
+ return formatter.printer().print(maxValue);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ if (!super.equals(o)) return false;
+
+ Date that = (Date) o;
+ return Objects.equals(formatter.format(), that.formatter.format());
+ }
+
+ @Override
+ public int hashCode() {
+ int result = super.hashCode();
+ result = 31 * result + formatter.format().hashCode();
+ return result;
}
}
@@ -467,10 +463,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
- public Text(long maxDoc, boolean isSearchable, boolean isAggregatable) {
- super((byte) 3, maxDoc, isSearchable, isAggregatable);
- }
-
@Override
public int compare(BytesRef o1, BytesRef o2) {
return o1.compareTo(o2);
@@ -492,12 +484,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
- return minValue != null ? minValue.utf8ToString() : null;
+ return minValue.utf8ToString();
}
@Override
public String getMaxValueAsString() {
- return maxValue != null ? maxValue.utf8ToString() : null;
+ return maxValue.utf8ToString();
}
@Override
@@ -516,10 +508,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
minValue, maxValue);
}
- public Ip(long maxDoc, boolean isSearchable, boolean isAggregatable) {
- super((byte) 4, maxDoc, isSearchable, isAggregatable);
- }
-
@Override
public int compare(InetAddress o1, InetAddress o2) {
byte[] b1 = InetAddressPoint.encode(o1);
@@ -544,12 +532,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public String getMinValueAsString() {
- return minValue != null ? NetworkAddress.format(minValue) : null;
+ return NetworkAddress.format(minValue);
}
@Override
public String getMaxValueAsString() {
- return maxValue != null ? NetworkAddress.format(maxValue) : null;
+ return NetworkAddress.format(maxValue);
}
}
@@ -561,53 +549,35 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
long sumTotalTermFreq = in.readLong();
boolean isSearchable = in.readBoolean();
boolean isAggregatable = in.readBoolean();
- boolean hasMinMax = in.readBoolean();
switch (type) {
case 0:
- if (hasMinMax) {
- return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, in.readLong(), in.readLong());
- }
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable);
+ isSearchable, isAggregatable, in.readLong(), in.readLong());
case 1:
- if (hasMinMax) {
- return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, in.readDouble(), in.readDouble());
- }
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable);
+ isSearchable, isAggregatable, in.readDouble(), in.readDouble());
case 2:
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
- if (hasMinMax) {
- return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
- }
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, formatter);
+ isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
+
case 3:
- if (hasMinMax) {
- return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
- }
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
- isSearchable, isAggregatable, null, null);
+ isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
case 4:
- InetAddress min = null;
- InetAddress max = null;
- if (hasMinMax) {
- int l1 = in.readByte();
- byte[] b1 = new byte[l1];
- int l2 = in.readByte();
- byte[] b2 = new byte[l2];
- min = InetAddressPoint.decode(b1);
- max = InetAddressPoint.decode(b2);
- }
+ int l1 = in.readByte();
+ byte[] b1 = new byte[l1];
+ in.readBytes(b1, 0, l1);
+ int l2 = in.readByte();
+ byte[] b2 = new byte[l2];
+ in.readBytes(b2, 0, l2);
+ InetAddress min = InetAddressPoint.decode(b1);
+ InetAddress max = InetAddressPoint.decode(b2);
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, min, max);
diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java
index 42c4ccc701..38dd10df96 100644
--- a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java
@@ -51,7 +51,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private String parent;
private String preference;
- private String[] fields;
+ private String[] storedFields;
private FetchSourceContext fetchSourceContext;
@@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private VersionType versionType = VersionType.INTERNAL;
private long version = Versions.MATCH_ANY;
- private boolean ignoreErrorsOnGeneratedFields;
public GetRequest() {
type = "_all";
@@ -187,20 +186,20 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
}
/**
- * Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
+ * Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
- public GetRequest fields(String... fields) {
- this.fields = fields;
+ public GetRequest storedFields(String... fields) {
+ this.storedFields = fields;
return this;
}
/**
- * Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
+ * Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
- public String[] fields() {
- return this.fields;
+ public String[] storedFields() {
+ return this.storedFields;
}
/**
@@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
return this;
}
- public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
- this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
- return this;
- }
-
public VersionType versionType() {
return this.versionType;
}
- public boolean ignoreErrorsOnGeneratedFields() {
- return ignoreErrorsOnGeneratedFields;
- }
-
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@@ -270,19 +260,12 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
parent = in.readOptionalString();
preference = in.readOptionalString();
refresh = in.readBoolean();
- int size = in.readInt();
- if (size >= 0) {
- fields = new String[size];
- for (int i = 0; i < size; i++) {
- fields[i] = in.readString();
- }
- }
+ storedFields = in.readOptionalStringArray();
realtime = in.readBoolean();
- this.ignoreErrorsOnGeneratedFields = in.readBoolean();
this.versionType = VersionType.fromValue(in.readByte());
this.version = in.readLong();
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@@ -295,19 +278,11 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
out.writeOptionalString(preference);
out.writeBoolean(refresh);
- if (fields == null) {
- out.writeInt(-1);
- } else {
- out.writeInt(fields.length);
- for (String field : fields) {
- out.writeString(field);
- }
- }
+ out.writeOptionalStringArray(storedFields);
out.writeBoolean(realtime);
- out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeByte(versionType.getValue());
out.writeLong(version);
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalWriteable(fetchSourceContext);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java
index 7827de12ea..f56905d86e 100644
--- a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java
@@ -88,8 +88,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
- public GetRequestBuilder setFields(String... fields) {
- request.fields(fields);
+ public GetRequestBuilder setStoredFields(String... fields) {
+ request.storedFields(fields);
return this;
}
@@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
return this;
}
- public GetRequestBuilder setIgnoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
- request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
- return this;
- }
-
/**
* Sets the version, which will cause the get operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
diff --git a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java
index 5741984d35..87cc42f9d2 100644
--- a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java
@@ -134,14 +134,26 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.getSource();
}
+ /**
+ * @deprecated Use {@link GetResponse#getSource()} instead
+ */
+ @Deprecated
public Map<String, GetField> getFields() {
return getResult.getFields();
}
+ /**
+ * @deprecated Use {@link GetResponse#getSource()} instead
+ */
+ @Deprecated
public GetField getField(String name) {
return getResult.field(name);
}
+ /**
+ * @deprecated Use {@link GetResponse#getSource()} instead
+ */
+ @Deprecated
@Override
public Iterator<GetField> iterator() {
return getResult.iterator();
diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java
index 001e4ebd7a..1decd8ce94 100644
--- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java
@@ -28,6 +28,7 @@ import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@@ -58,7 +59,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
private String id;
private String routing;
private String parent;
- private String[] fields;
+ private String[] storedFields;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
private FetchSourceContext fetchSourceContext;
@@ -136,13 +137,13 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
return parent;
}
- public Item fields(String... fields) {
- this.fields = fields;
+ public Item storedFields(String... fields) {
+ this.storedFields = fields;
return this;
}
- public String[] fields() {
- return this.fields;
+ public String[] storedFields() {
+ return this.storedFields;
}
public long version() {
@@ -188,17 +189,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
id = in.readString();
routing = in.readOptionalString();
parent = in.readOptionalString();
- int size = in.readVInt();
- if (size > 0) {
- fields = new String[size];
- for (int i = 0; i < size; i++) {
- fields[i] = in.readString();
- }
- }
+ storedFields = in.readOptionalStringArray();
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@@ -208,19 +203,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(parent);
- if (fields == null) {
- out.writeVInt(0);
- } else {
- out.writeVInt(fields.length);
- for (String field : fields) {
- out.writeString(field);
- }
- }
-
+ out.writeOptionalStringArray(storedFields);
out.writeLong(version);
out.writeByte(versionType.getValue());
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalWriteable(fetchSourceContext);
}
@Override
@@ -233,7 +220,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (version != item.version) return false;
if (fetchSourceContext != null ? !fetchSourceContext.equals(item.fetchSourceContext) : item.fetchSourceContext != null)
return false;
- if (!Arrays.equals(fields, item.fields)) return false;
+ if (!Arrays.equals(storedFields, item.storedFields)) return false;
if (!id.equals(item.id)) return false;
if (!index.equals(item.index)) return false;
if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false;
@@ -251,7 +238,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
result = 31 * result + id.hashCode();
result = 31 * result + (routing != null ? routing.hashCode() : 0);
result = 31 * result + (parent != null ? parent.hashCode() : 0);
- result = 31 * result + (fields != null ? Arrays.hashCode(fields) : 0);
+ result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0);
result = 31 * result + Long.hashCode(version);
result = 31 * result + versionType.hashCode();
result = 31 * result + (fetchSourceContext != null ? fetchSourceContext.hashCode() : 0);
@@ -262,8 +249,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String preference;
boolean realtime = true;
boolean refresh;
- public boolean ignoreErrorsOnGeneratedFields = false;
-
List<Item> items = new ArrayList<>();
public List<Item> getItems() {
@@ -338,11 +323,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
- public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
- this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
- return this;
- }
-
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
}
@@ -386,7 +366,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String id = null;
String routing = defaultRouting;
String parent = null;
- List<String> fields = null;
+ List<String> storedFields = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
@@ -410,8 +390,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("fields".equals(currentFieldName)) {
- fields = new ArrayList<>();
- fields.add(parser.text());
+ throw new ParsingException(parser.getTokenLocation(),
+ "Unsupported field [fields] used, expected [stored_fields] instead");
+ } else if ("stored_fields".equals(currentFieldName)) {
+ storedFields = new ArrayList<>();
+ storedFields.add(parser.text());
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
@@ -427,9 +410,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
- fields = new ArrayList<>();
+ throw new ParsingException(parser.getTokenLocation(),
+ "Unsupported field [fields] used, expected [stored_fields] instead");
+ } else if ("stored_fields".equals(currentFieldName)) {
+ storedFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- fields.add(parser.text());
+ storedFields.add(parser.text());
}
} else if ("_source".equals(currentFieldName)) {
ArrayList<String> includes = new ArrayList<>();
@@ -471,12 +457,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
}
String[] aFields;
- if (fields != null) {
- aFields = fields.toArray(new String[fields.size()]);
+ if (storedFields != null) {
+ aFields = storedFields.toArray(new String[storedFields.size()]);
} else {
aFields = defaultFields;
}
- items.add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
+ items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType)
.fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
}
}
@@ -491,7 +477,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
- items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
+ items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
}
}
@@ -510,7 +496,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
preference = in.readOptionalString();
refresh = in.readBoolean();
realtime = in.readBoolean();
- ignoreErrorsOnGeneratedFields = in.readBoolean();
int size = in.readVInt();
items = new ArrayList<>(size);
@@ -525,7 +510,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeOptionalString(preference);
out.writeBoolean(refresh);
out.writeBoolean(realtime);
- out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeVInt(items.size());
for (Item item : items) {
diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java
index 6e32e1caf3..a2cb204d5e 100644
--- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java
@@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest
request.realtime(realtime);
return this;
}
-
- public MultiGetRequestBuilder setIgnoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
- request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
- return this;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
index 47f07c5248..25a624b2eb 100644
--- a/core/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
@@ -35,7 +35,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
private String preference;
boolean realtime = true;
boolean refresh;
- boolean ignoreErrorsOnGeneratedFields = false;
IntArrayList locations;
List<MultiGetRequest.Item> items;
@@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = multiGetRequest.preference;
realtime = multiGetRequest.realtime;
refresh = multiGetRequest.refresh;
- ignoreErrorsOnGeneratedFields = multiGetRequest.ignoreErrorsOnGeneratedFields;
}
@Override
@@ -87,11 +85,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
return this;
}
- public MultiGetShardRequest ignoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
- this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
- return this;
- }
-
public boolean refresh() {
return this.refresh;
}
@@ -130,7 +123,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = in.readOptionalString();
refresh = in.readBoolean();
realtime = in.readBoolean();
- ignoreErrorsOnGeneratedFields = in.readBoolean();
}
@Override
@@ -146,11 +138,5 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
out.writeOptionalString(preference);
out.writeBoolean(refresh);
out.writeBoolean(realtime);
- out.writeBoolean(ignoreErrorsOnGeneratedFields);
-
- }
-
- public boolean ignoreErrorsOnGeneratedFields() {
- return ignoreErrorsOnGeneratedFields;
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java
index 240035aee2..6b9de7ecf6 100644
--- a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java
+++ b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java
@@ -92,8 +92,8 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
indexShard.refresh("refresh_flag_get");
}
- GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
- request.realtime(), request.version(), request.versionType(), request.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
+ GetResult result = indexShard.getService().get(request.type(), request.id(), request.storedFields(),
+ request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
return new GetResponse(result);
}
diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java
index d9e5eaa2e1..8353c5dc38 100644
--- a/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java
+++ b/core/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.get;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@@ -86,13 +88,15 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
for (int i = 0; i < request.locations.size(); i++) {
MultiGetRequest.Item item = request.items.get(i);
try {
- GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
+ GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(),
+ item.versionType(), item.fetchSourceContext());
response.add(request.locations.get(i), new GetResponse(getResult));
} catch (Exception e) {
if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e;
} else {
- logger.debug("{} failed to execute multi_get for [{}]/[{}]", e, shardId, item.type(), item.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
+ item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 910abf8728..cce0f6c8ee 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -91,6 +91,17 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
private String pipeline;
+ /**
+ * Value for {@link #getAutoGeneratedTimestamp()} if the document has an external
+ * provided ID.
+ */
+ public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1;
+
+ private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
+
+ private boolean isRetry = false;
+
+
public IndexRequest() {
}
@@ -141,6 +152,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
}
+ if (opType() != OpType.INDEX && id == null) {
+ addValidationError("an id is required for a " + opType() + " operation", validationException);
+ }
+
if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
}
@@ -155,6 +170,11 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
+
+ if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
+ validationException = addValidationError("an id must be provided if version type or value are set", validationException);
+ }
+
return validationException;
}
@@ -534,10 +554,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
// generate id if not already provided and id generation is allowed
- if (allowIdGeneration) {
- if (id == null) {
- id(UUIDs.base64UUID());
- }
+ if (allowIdGeneration && id == null) {
+ assert autoGeneratedTimestamp == -1;
+ autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
+ id(UUIDs.base64UUID());
}
// generate timestamp if not provided, we always have one post this stage...
@@ -584,6 +604,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString();
+ isRetry = in.readBoolean();
+ autoGeneratedTimestamp = in.readLong();
}
@Override
@@ -600,6 +622,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);
+ out.writeBoolean(isRetry);
+ out.writeLong(autoGeneratedTimestamp);
}
@Override
@@ -612,4 +636,25 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
}
+
+
+ /**
+ * Returns <code>true</code> if this request has been sent to a shard copy more than once.
+ */
+ public boolean isRetry() {
+ return isRetry;
+ }
+
+ @Override
+ public void onRetry() {
+ isRetry = true;
+ }
+
+ /**
+ * Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the
+ * document has no auto generated timestamp. This method will return a positive value iff the id was auto generated.
+ */
+ public long getAutoGeneratedTimestamp() {
+ return autoGeneratedTimestamp;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
index 7d567b4bdb..a9d8bcaa56 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -207,15 +207,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
}
/**
- * Sets a string representation of the {@link #setOpType(DocumentRequest.OpType)}. Can
- * be either "index" or "create".
- */
- public IndexRequestBuilder setOpType(String opType) {
- request.opType(opType);
- return this;
- }
-
- /**
* Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/
public IndexRequestBuilder setCreate(boolean create) {
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java
index 9167740567..e90c76590c 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java
@@ -20,6 +20,7 @@
package org.elasticsearch.action.index;
import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@@ -56,7 +57,7 @@ public class IndexResponse extends DocWriteResponse {
builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion());
builder.append(",result=").append(getResult().getLowercase());
- builder.append(",shards=").append(getShardInfo());
+ builder.append(",shards=").append(Strings.toString(getShardInfo(), true));
return builder.append("]").toString();
}
diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
index 785a5d2315..cc3fbb7906 100644
--- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
@@ -158,7 +158,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
- final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType());
+ final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
@@ -171,7 +171,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
- return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType());
+ return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
}
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,
@@ -188,7 +188,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
"Dynamic mappings are not available on the node that holds the primary yet");
}
}
- final boolean created = indexShard.index(operation);
+ indexShard.index(operation);
// update the version on request so it will happen on the replicas
final long version = operation.version();
@@ -197,7 +197,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
assert request.versionType().validateVersionForWrites(request.version());
- IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created);
+ IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), operation.isCreated());
return new WriteResult<>(response, operation.getTranslogLocation());
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java
index 779b80e1e6..70a117bf1f 100644
--- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java
+++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.ingest;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@@ -91,7 +93,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.executeIndexRequest(indexRequest, t -> {
- logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t);
}, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
@@ -106,7 +108,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
- logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> {
if (exception != null) {
diff --git a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java
index 2403c3ee49..c156dcfc98 100644
--- a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java
@@ -35,16 +35,18 @@ public class MainResponse extends ActionResponse implements ToXContent {
private String nodeName;
private Version version;
private ClusterName clusterName;
+ private String clusterUuid;
private Build build;
private boolean available;
MainResponse() {
}
- public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) {
+ public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, boolean available) {
this.nodeName = nodeName;
this.version = version;
this.clusterName = clusterName;
+ this.clusterUuid = clusterUuid;
this.build = build;
this.available = available;
}
@@ -61,6 +63,10 @@ public class MainResponse extends ActionResponse implements ToXContent {
return clusterName;
}
+ public String getClusterUuid() {
+ return clusterUuid;
+ }
+
public Build getBuild() {
return build;
}
@@ -75,6 +81,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
out.writeString(nodeName);
Version.writeVersion(version, out);
clusterName.writeTo(out);
+ out.writeString(clusterUuid);
Build.writeBuild(build, out);
out.writeBoolean(available);
}
@@ -85,6 +92,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
nodeName = in.readString();
version = Version.readVersion(in);
clusterName = new ClusterName(in);
+ clusterUuid = in.readString();
build = Build.readBuild(in);
available = in.readBoolean();
}
@@ -94,6 +102,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
builder.startObject();
builder.field("name", nodeName);
builder.field("cluster_name", clusterName.value());
+ builder.field("cluster_uuid", clusterUuid);
builder.startObject("version")
.field("number", version.toString())
.field("build_hash", build.shortHash())
diff --git a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java
index c37268a52d..368696a955 100644
--- a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java
+++ b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java
@@ -52,7 +52,7 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
assert Node.NODE_NAME_SETTING.exists(settings);
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
listener.onResponse(
- new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), Build.CURRENT,
- available));
+ new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(),
+ clusterState.metaData().clusterUUID(), Build.CURRENT, available));
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
index a61384f959..6cb68b8e9b 100644
--- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
@@ -20,8 +20,10 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions;
@@ -35,18 +37,14 @@ import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
-import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.List;
@@ -58,7 +56,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController;
@@ -77,7 +75,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardDocs;
- protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
+ protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) {
@@ -191,7 +189,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
innerMoveToSecondPhase();
} catch (Exception e) {
if (logger.isDebugEnabled()) {
- logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{}: Failed to execute [{}] while moving to second phase",
+ shardIt.shardId(),
+ request),
+ e);
}
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
}
@@ -211,15 +214,21 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
if (totalOps.incrementAndGet() == expectedTotalOps) {
if (logger.isDebugEnabled()) {
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
- logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{}: Failed to execute [{}]",
+ shard != null ? shard.shortSummary() :
+ shardIt.shardId(),
+ request),
+ e);
} else if (logger.isTraceEnabled()) {
- logger.trace("{}: Failed to execute [{}]", e, shard, request);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
}
}
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) {
- logger.debug("All shards failed for phase: [{}]", e, firstPhaseName());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
}
// no successful ops, raise an exception
@@ -236,10 +245,13 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null;
// trace log this exception
- if (logger.isTraceEnabled()) {
- logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(),
- request, lastShard);
- }
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{}: Failed to execute [{}] lastShard [{}]",
+ shard != null ? shard.shortSummary() : shardIt.shardId(),
+ request,
+ lastShard),
+ e);
if (!lastShard) {
try {
performFirstPhase(shardIndex, shardIt, nextShard);
@@ -251,8 +263,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
- logger.debug("{}: Failed to execute [{}] lastShard [{}]", e,
- shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{}: Failed to execute [{}] lastShard [{}]",
+ shard != null ? shard.shortSummary() :
+ shardIt.shardId(),
+ request,
+ lastShard),
+ e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
index 8614d7b118..ba73b0f4be 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
@@ -19,15 +19,15 @@
package org.elasticsearch.action.search;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
@@ -43,7 +43,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
- SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
+ SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@@ -105,7 +105,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
index 9d8305cf6b..ccd646ae12 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
@@ -20,17 +20,17 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult;
@@ -50,7 +50,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
- SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
+ SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@@ -113,7 +113,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();
@@ -182,7 +182,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();
diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
index 411e559322..7306e645e0 100644
--- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
@@ -17,7 +17,7 @@
* under the License.
*/
-package org.elasticsearch.search.controller;
+package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectHashMap;
@@ -67,7 +67,6 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
@@ -76,15 +75,12 @@ import java.util.stream.StreamSupport;
*/
public class SearchPhaseController extends AbstractComponent {
- public static final Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = new Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>>() {
- @Override
- public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
- int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
- if (i == 0) {
- i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id();
- }
- return i;
+ public static final Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = (o1, o2) -> {
+ int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
+ if (i == 0) {
+ i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id();
}
+ return i;
};
public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
@@ -93,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent {
private final ScriptService scriptService;
private final ClusterService clusterService;
- @Inject
- public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
+ SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
super(settings);
this.bigArrays = bigArrays;
this.scriptService = scriptService;
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
index fad4d60275..d799bc2676 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java
@@ -19,14 +19,12 @@
package org.elasticsearch.action.search;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
@@ -36,7 +34,7 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
- SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
+ SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
index 5f90d291dd..6df2bb3f87 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java
@@ -20,17 +20,17 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
@@ -46,7 +46,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
- SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
+ SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@@ -115,7 +115,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
index c03e904b6d..3c320447fe 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
@@ -250,14 +250,6 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
/**
- * Sets no fields to be loaded, resulting in only id and type to be returned per field.
- */
- public SearchRequestBuilder setNoStoredFields() {
- sourceBuilder().noStoredFields();
- return this;
- }
-
- /**
* Indicates whether the response should contain the stored _source for every hit
*/
public SearchRequestBuilder setFetchSource(boolean fetch) {
@@ -302,7 +294,6 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
/**
* Adds a stored field to load and return (note, it must be stored) as part of the search request.
- * If none are specified, the source of the document will be return.
*/
public SearchRequestBuilder addStoredField(String field) {
sourceBuilder().storedField(field);
@@ -380,9 +371,8 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
/**
- * Sets the stored fields to load and return as part of the search request. If none
- * are specified, the source of the document will be returned.
- *
+ * Adds stored fields to load and return (note, it must be stored) as part of the search request.
+ * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}.
* @deprecated Use {@link SearchRequestBuilder#storedFields(String...)} instead.
*/
@Deprecated
@@ -392,8 +382,8 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
/**
- * Sets the fields to load and return as part of the search request. If none
- * are specified, the source of the document will be returned.
+ * Adds stored fields to load and return (note, it must be stored) as part of the search request.
+ * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}.
*/
public SearchRequestBuilder storedFields(String... fields) {
sourceBuilder().storedFields(Arrays.asList(fields));
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
index 72154f224d..2bdf7dc30f 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
@@ -19,15 +19,15 @@
package org.elasticsearch.action.search;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
@@ -40,7 +40,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
- private final ESLogger logger;
+ private final Logger logger;
private final SearchPhaseController searchPhaseController;
private final SearchTransportService searchTransportService;
private final SearchScrollRequest request;
@@ -52,7 +52,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps;
private final AtomicInteger counter;
- SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
+ SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@@ -146,7 +146,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute query phase", e, searchId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
}
addShardFailure(shardIndex, new ShardSearchFailure(e));
successfulOps.decrementAndGet();
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
index d9f649a7a5..4024d3b5f3 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
@@ -20,15 +20,15 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
@@ -43,7 +43,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
- private final ESLogger logger;
+ private final Logger logger;
private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request;
@@ -56,7 +56,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private volatile ScoreDoc[] sortedShardDocs;
private final AtomicInteger successfulOps;
- SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
+ SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@@ -146,7 +146,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] Failed to execute query phase", failure, searchId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
}
addShardFailure(shardIndex, new ShardSearchFailure(failure));
successfulOps.decrementAndGet();
diff --git a/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
index 8552d21b5c..0451a8920e 100644
--- a/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
@@ -17,17 +17,15 @@
* under the License.
*/
-package org.elasticsearch.search.action;
+package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
-import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
@@ -45,9 +43,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.query.ScrollQuerySearchResult;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
-import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@@ -73,37 +69,10 @@ public class SearchTransportService extends AbstractComponent {
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
private final TransportService transportService;
- private final SearchService searchService;
- @Inject
- public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
+ SearchTransportService(Settings settings, TransportService transportService) {
super(settings);
this.transportService = transportService;
- this.searchService = searchService;
- transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
- new FreeContextTransportHandler<>());
- transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
- new FreeContextTransportHandler<>());
- transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
- new ClearScrollContextsTransportHandler());
- transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
- new SearchDfsTransportHandler());
- transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryTransportHandler());
- transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryByIdTransportHandler());
- transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryScrollTransportHandler());
- transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryFetchTransportHandler());
- transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryQueryFetchTransportHandler());
- transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
- new SearchQueryFetchScrollTransportHandler());
- transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
- new FetchByIdTransportHandler<>());
- transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
- new FetchByIdTransportHandler<>());
}
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
@@ -127,8 +96,8 @@ public class SearchTransportService extends AbstractComponent {
}
public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
- transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
- new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
+ transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
+ new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
}
public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
@@ -281,87 +250,66 @@ public class SearchTransportService extends AbstractComponent {
}
}
- class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest>
- implements TransportRequestHandler<FreeContextRequest> {
- @Override
- public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
- boolean freed = searchService.freeContext(request.id());
- channel.sendResponse(new SearchFreeContextResponse(freed));
- }
- }
-
- static class ClearScrollContextsRequest extends TransportRequest {
- }
-
- class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> {
- @Override
- public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception {
- searchService.freeAllScrollContexts();
- channel.sendResponse(TransportResponse.Empty.INSTANCE);
- }
- }
-
- class SearchDfsTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
- @Override
- public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
- DfsSearchResult result = searchService.executeDfsPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
- @Override
- public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
- QuerySearchResultProvider result = searchService.executeQueryPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryByIdTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
- @Override
- public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
- QuerySearchResult result = searchService.executeQueryPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
- @Override
- public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
- ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryFetchTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
- @Override
- public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
- QueryFetchSearchResult result = searchService.executeFetchPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
- @Override
- public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
- QueryFetchSearchResult result = searchService.executeFetchPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class FetchByIdTransportHandler<Request extends ShardFetchRequest> implements TransportRequestHandler<Request> {
- @Override
- public void messageReceived(Request request, TransportChannel channel) throws Exception {
- FetchSearchResult result = searchService.executeFetchPhase(request);
- channel.sendResponse(result);
- }
- }
-
- class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
- @Override
- public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
- ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
- channel.sendResponse(result);
- }
+ public static void registerRequestHandler(TransportService transportService, SearchService searchService) {
+ transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
+ ((request, channel) -> {
+ boolean freed = searchService.freeContext(request.id());
+ channel.sendResponse(new SearchFreeContextResponse(freed));
+ }));
+ transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
+ (request, channel) -> {
+ boolean freed = searchService.freeContext(request.id());
+ channel.sendResponse(new SearchFreeContextResponse(freed));
+ });
+ transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE,
+ ThreadPool.Names.SAME, (request, channel) -> {
+ searchService.freeAllScrollContexts();
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ });
+ transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ DfsSearchResult result = searchService.executeDfsPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ QuerySearchResultProvider result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ QuerySearchResult result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ FetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ });
+ transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
+ (request, channel) -> {
+ FetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ });
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
index 092b69fc93..ef3815b1b3 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.search;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@@ -30,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
-import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@@ -42,8 +43,6 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
-/**
- */
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
private final ClusterService clusterService;
@@ -51,11 +50,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
- ClusterService clusterService, SearchTransportService searchTransportService,
- ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
+ ClusterService clusterService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService;
- this.searchTransportService = searchTransportService;
+ this.searchTransportService = new SearchTransportService(settings, transportService);
}
@Override
@@ -144,7 +143,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
}
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
- logger.warn("Clear SC failed on node[{}]", e, node);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
if (expectedOps.countDown()) {
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
} else {
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
index 8a33bff8f0..9b9ca48fc3 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -29,10 +29,11 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -53,13 +54,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final SearchPhaseController searchPhaseController;
@Inject
- public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
- TransportService transportService, SearchTransportService searchTransportService,
+ public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
+ TransportService transportService, SearchService searchService,
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
- this.searchPhaseController = searchPhaseController;
- this.searchTransportService = searchTransportService;
+ this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
+ this.searchTransportService = new SearchTransportService(settings, transportService);
+ SearchTransportService.registerRequestHandler(transportService, searchService);
this.clusterService = clusterService;
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java
index 485baaa022..9d2307f1b4 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java
@@ -26,8 +26,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.search.action.SearchTransportService;
-import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -45,15 +45,14 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
private final SearchPhaseController searchPhaseController;
@Inject
- public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
- ClusterService clusterService, SearchTransportService searchTransportService,
- SearchPhaseController searchPhaseController,
- ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
+ public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
+ TransportService transportService, ClusterService clusterService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new);
this.clusterService = clusterService;
- this.searchTransportService = searchTransportService;
- this.searchPhaseController = searchPhaseController;
+ this.searchTransportService = new SearchTransportService(settings, transportService);
+ this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java
index 78a61ebfe1..ee260ddd1e 100644
--- a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java
+++ b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java
@@ -19,9 +19,9 @@
package org.elasticsearch.action.support;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ListenableActionFuture;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.threadpool.ThreadPool;
@@ -33,7 +33,7 @@ import java.util.List;
*/
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
- private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
+ private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
final ThreadPool threadPool;
volatile Object listeners;
diff --git a/core/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java b/core/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java
index 9f3f8b9a5d..4d15639dbe 100644
--- a/core/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java
+++ b/core/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java
@@ -121,8 +121,24 @@ public final class ActiveShardCount implements Writeable {
}
/**
+ * Returns true iff the given number of active shards is enough to meet
+ * the required shard count represented by this instance. This method
+ * should only be invoked with {@link ActiveShardCount} objects created
+ * from {@link #from(int)}, or {@link #NONE} or {@link #ONE}.
+ */
+ public boolean enoughShardsActive(final int activeShardCount) {
+ if (this.value < 0) {
+ throw new IllegalStateException("not enough information to resolve to shard count");
+ }
+ if (activeShardCount < 0) {
+ throw new IllegalArgumentException("activeShardCount cannot be negative");
+ }
+ return this.value <= activeShardCount;
+ }
+
+ /**
* Returns true iff the given cluster state's routing table contains enough active
- * shards to meet the required shard count represented by this instance.
+ * shards for the given index to meet the required shard count represented by this instance.
*/
public boolean enoughShardsActive(final ClusterState clusterState, final String indexName) {
if (this == ActiveShardCount.NONE) {
diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java
index d4ddae7822..5c9152b475 100644
--- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java
+++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java
@@ -24,8 +24,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -41,16 +41,17 @@ import java.util.List;
public final class AutoCreateIndex {
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
- new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
+ new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope, Setting.Property.Dynamic);
private final boolean dynamicMappingDisabled;
private final IndexNameExpressionResolver resolver;
- private final AutoCreate autoCreate;
+ private volatile AutoCreate autoCreate;
- public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
+ public AutoCreateIndex(Settings settings, ClusterSettings clusterSettings, IndexNameExpressionResolver resolver) {
this.resolver = resolver;
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate);
}
/**
@@ -64,6 +65,8 @@ public final class AutoCreateIndex {
* Should the index be auto created?
*/
public boolean shouldAutoCreate(String index, ClusterState state) {
+ // One volatile read, so that all checks are done against the same instance:
+ final AutoCreate autoCreate = this.autoCreate;
if (autoCreate.autoCreateIndex == false) {
return false;
}
@@ -87,7 +90,15 @@ public final class AutoCreateIndex {
return false;
}
- private static class AutoCreate {
+ AutoCreate getAutoCreate() {
+ return autoCreate;
+ }
+
+ void setAutoCreate(AutoCreate autoCreate) {
+ this.autoCreate = autoCreate;
+ }
+
+ static class AutoCreate {
private final boolean autoCreateIndex;
private final List<Tuple<String, Boolean>> expressions;
@@ -128,5 +139,13 @@ public final class AutoCreateIndex {
this.expressions = expressions;
this.autoCreateIndex = autoCreateIndex;
}
+
+ boolean isAutoCreateIndex() {
+ return autoCreateIndex;
+ }
+
+ List<Tuple<String, Boolean>> getExpressions() {
+ return expressions;
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
index b4033d6a9f..0a53b63b66 100644
--- a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.support;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@@ -75,8 +76,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
try {
channel.sendResponse(e);
} catch (Exception e1) {
- logger.warn("Failed to send error response for action [{}] and request [{}]", e1,
- actionName, request);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "Failed to send error response for action [{}] and request [{}]",
+ actionName,
+ request),
+ e1);
}
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java
index 4ff7cdaa7b..759693e550 100644
--- a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java
+++ b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java
@@ -19,10 +19,12 @@
package org.elasticsearch.action.support;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.threadpool.ThreadPool;
@@ -39,12 +41,12 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
*/
public static class Wrapper {
- private final ESLogger logger;
+ private final Logger logger;
private final ThreadPool threadPool;
private final boolean threadedListener;
- public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) {
+ public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
this.logger = logger;
this.threadPool = threadPool;
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
@@ -68,13 +70,13 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
}
}
- private final ESLogger logger;
+ private final Logger logger;
private final ThreadPool threadPool;
private final String executor;
private final ActionListener<Response> listener;
private final boolean forceExecution;
- public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
+ public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
boolean forceExecution) {
this.logger = logger;
this.threadPool = threadPool;
@@ -118,7 +120,8 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
@Override
public void onFailure(Exception e) {
- logger.warn("failed to execute failure callback on [{}], failure [{}]", e, listener, e);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
}
});
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
index 582878a427..7d1a091d6b 100644
--- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
@@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
@@ -165,9 +165,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger();
- private final ESLogger logger;
+ private final Logger logger;
- private RequestFilterChain(TransportAction<Request, Response> action, ESLogger logger) {
+ private RequestFilterChain(TransportAction<Request, Response> action, Logger logger) {
this.action = action;
this.logger = logger;
}
@@ -201,9 +201,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final ActionFilter[] filters;
private final AtomicInteger index;
- private final ESLogger logger;
+ private final Logger logger;
- private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) {
+ private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
this.filters = filters;
this.index = new AtomicInteger(filters.length);
this.logger = logger;
diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
index 826d76de83..87ef385a24 100644
--- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.ActionFilters;
@@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger;
@@ -224,7 +225,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (e != null) {
if (logger.isTraceEnabled()) {
if (!TransportActions.isShardNotAvailableException(e)) {
- logger.trace("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "{}: failed to execute [{}]",
+ shard != null ? shard.shortSummary() : shardIt.shardId(),
+ request),
+ e);
}
}
}
@@ -233,7 +240,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (logger.isDebugEnabled()) {
if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) {
- logger.debug("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "{}: failed to execute [{}]",
+ shard != null ? shard.shortSummary() : shardIt.shardId(),
+ request),
+ e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
index cb5ba8788c..98c962b3ee 100644
--- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast.node;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
@@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@@ -363,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
String nodeId = node.getId();
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
- logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
}
// this is defensive to protect against the possibility of double invocation
@@ -441,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
shardResults[shardIndex] = failure;
if (TransportActions.isShardNotAvailableException(e)) {
if (logger.isTraceEnabled()) {
- logger.trace("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "[{}] failed to execute operation for shard [{}]",
+ actionName,
+ shardRouting.shortSummary()),
+ e);
}
} else {
if (logger.isDebugEnabled()) {
- logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "[{}] failed to execute operation for shard [{}]",
+ actionName,
+ shardRouting.shortSummary()),
+ e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
index 7d6d437573..a664c325a4 100644
--- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.master;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse;
@@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
public void onFailure(Exception t) {
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
- logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
+ logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
retry(t, MasterNodeChangePredicate.INSTANCE);
} else {
listener.onFailure(t);
@@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
@Override
public void onTimeout(TimeValue timeout) {
- logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout);
+ logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
listener.onFailure(new MasterNotDiscoveredException(failure));
}
}, changePredicate
diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
index 276484286b..3582f5f5aa 100644
--- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.nodes;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException;
@@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@@ -238,7 +239,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
- logger.debug("failed to execute on node [{}]", t, nodeId);
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
index 070840ca2e..d541ef6a35 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.support.replication;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
@@ -25,11 +27,13 @@ import org.elasticsearch.action.UnavailableShardsException;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@@ -40,17 +44,20 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
+import java.util.Objects;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Supplier;
+import java.util.stream.Collectors;
public class ReplicationOperation<
Request extends ReplicationRequest<Request>,
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
> {
- private final ESLogger logger;
+ private final Logger logger;
private final Request request;
private final Supplier<ClusterState> clusterStateSupplier;
private final String opType;
@@ -65,7 +72,7 @@ public class ReplicationOperation<
* operations and the primary finishes.</li>
* </ul>
*/
- private final AtomicInteger pendingShards = new AtomicInteger();
+ private final AtomicInteger pendingActions = new AtomicInteger();
private final AtomicInteger successfulShards = new AtomicInteger();
private final boolean executeOnReplicas;
private final Primary<Request, ReplicaRequest, PrimaryResultT> primary;
@@ -80,7 +87,7 @@ public class ReplicationOperation<
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
ActionListener<PrimaryResultT> listener,
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
- Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
+ Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
this.executeOnReplicas = executeOnReplicas;
this.replicasProxy = replicas;
this.primary = primary;
@@ -102,7 +109,7 @@ public class ReplicationOperation<
}
totalShards.incrementAndGet();
- pendingShards.incrementAndGet();
+ pendingActions.incrementAndGet();
primaryResult = primary.perform(request);
final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
@@ -110,19 +117,45 @@ public class ReplicationOperation<
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
}
- performOnReplicas(primaryId, replicaRequest);
+ // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
+ // we have to make sure that every operation indexed into the primary after recovery start will also be replicated
+ // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
+ ClusterState clusterState = clusterStateSupplier.get();
+ final List<ShardRouting> shards = getShards(primaryId, clusterState);
+ Set<String> inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState);
+
+ markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards);
+
+ performOnReplicas(replicaRequest, shards);
successfulShards.incrementAndGet();
decPendingAndFinishIfNeeded();
}
- private void performOnReplicas(ShardId primaryId, ReplicaRequest replicaRequest) {
- // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
- // we have to make sure that every operation indexed into the primary after recovery start will also be replicated
- // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
- // If the index gets deleted after primary operation, we skip replication
- final List<ShardRouting> shards = getShards(primaryId, clusterStateSupplier.get());
+ private void markUnavailableShardsAsStale(ReplicaRequest replicaRequest, Set<String> inSyncAllocationIds, List<ShardRouting> shards) {
+ if (inSyncAllocationIds.isEmpty() == false && shards.isEmpty() == false) {
+ Set<String> availableAllocationIds = shards.stream()
+ .map(ShardRouting::allocationId)
+ .filter(Objects::nonNull)
+ .map(AllocationId::getId)
+ .collect(Collectors.toSet());
+
+ // if inSyncAllocationIds contains allocation ids of shards that don't exist in RoutingTable, mark copies as stale
+ for (String allocationId : Sets.difference(inSyncAllocationIds, availableAllocationIds)) {
+ // mark copy as stale
+ pendingActions.incrementAndGet();
+ replicasProxy.markShardCopyAsStale(replicaRequest.shardId(), allocationId, replicaRequest.primaryTerm(),
+ ReplicationOperation.this::decPendingAndFinishIfNeeded,
+ ReplicationOperation.this::onPrimaryDemoted,
+ throwable -> decPendingAndFinishIfNeeded()
+ );
+ }
+ }
+ }
+
+ private void performOnReplicas(ReplicaRequest replicaRequest, List<ShardRouting> shards) {
final String localNodeId = primary.routingEntry().currentNodeId();
+ // If the index gets deleted after primary operation, we skip replication
for (final ShardRouting shard : shards) {
if (executeOnReplicas == false || shard.unassigned()) {
if (shard.primary() == false) {
@@ -147,7 +180,7 @@ public class ReplicationOperation<
}
totalShards.incrementAndGet();
- pendingShards.incrementAndGet();
+ pendingActions.incrementAndGet();
replicasProxy.performOn(shard, replicaRequest, new ActionListener<TransportResponse.Empty>() {
@Override
public void onResponse(TransportResponse.Empty empty) {
@@ -157,8 +190,14 @@ public class ReplicationOperation<
@Override
public void onFailure(Exception replicaException) {
- logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType,
- shard, replicaRequest);
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failure while performing [{}] on replica {}, request [{}]",
+ shard.shardId(),
+ opType,
+ shard,
+ replicaRequest),
+ replicaException);
if (ignoreReplicaException(replicaException)) {
decPendingAndFinishIfNeeded();
} else {
@@ -166,7 +205,9 @@ public class ReplicationOperation<
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
- logger.warn("[{}] {}", replicaException, shard.shardId(), message);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
ReplicationOperation.this::decPendingAndFinishIfNeeded,
ReplicationOperation.this::onPrimaryDemoted,
@@ -222,6 +263,14 @@ public class ReplicationOperation<
}
}
+ protected Set<String> getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) {
+ IndexMetaData indexMetaData = clusterState.metaData().index(shardId.getIndex());
+ if (indexMetaData != null) {
+ return indexMetaData.inSyncAllocationIds(shardId.id());
+ }
+ return Collections.emptySet();
+ }
+
protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
// can be null if the index is deleted / closed on us..
final IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTableOrNull(shardId);
@@ -230,8 +279,8 @@ public class ReplicationOperation<
}
private void decPendingAndFinishIfNeeded() {
- assert pendingShards.get() > 0;
- if (pendingShards.decrementAndGet() == 0) {
+ assert pendingActions.get() > 0;
+ if (pendingActions.decrementAndGet() == 0) {
finish();
}
}
@@ -337,6 +386,20 @@ public class ReplicationOperation<
*/
void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
+
+ /**
+ * Marks shard copy as stale, removing its allocation id from the set of in-sync allocation ids.
+ *
+ * @param shardId shard id
+ * @param allocationId allocation id to remove from the set of in-sync allocation ids
+ * @param primaryTerm the primary term of the primary shard when requesting the failure
+ * @param onSuccess a callback to call when the allocation id has been successfully removed from the in-sync set.
+ * @param onPrimaryDemoted a callback to call when the request failed because the current primary was already demoted
+ * by the master.
+ * @param onIgnoredFailure a callback to call when the request failed, but the failure can be safely ignored.
+ */
+ void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
}
public static class RetryOnPrimaryException extends ElasticsearchException {
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
index a1ddcdcedd..596d2581a7 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
@@ -248,4 +248,12 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
public String getDescription() {
return toString();
}
+
+ /**
+ * This method is called before this replication request is retried
+ * the first time.
+ */
+ public void onRetry() {
+ // nothing by default
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 8294ccfe0d..9587b4e6b2 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
@@ -37,10 +38,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;
@@ -52,22 +55,26 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportChannelResponseHandler;
import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponse.Empty;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
+import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Supplier;
@@ -114,9 +121,12 @@ public abstract class TransportReplicationAction<
this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]";
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
- transportService.registerRequestHandler(transportPrimaryAction, request, executor, new PrimaryOperationTransportHandler());
+ transportService.registerRequestHandler(transportPrimaryAction, () -> new ConcreteShardRequest<>(request), executor,
+ new PrimaryOperationTransportHandler());
// we must never reject on because of thread pool capacity on replicas
- transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true, true,
+ transportService.registerRequestHandler(transportReplicaAction,
+ () -> new ConcreteShardRequest<>(replicaRequest),
+ executor, true, true,
new ReplicaOperationTransportHandler());
this.transportOptions = transportOptions();
@@ -162,7 +172,7 @@ public abstract class TransportReplicationAction<
/**
* Synchronous replica operation on nodes with replica copies. This is done under the lock form
- * {@link #acquireReplicaOperationLock(ShardId, long, ActionListener)}.
+ * {@link #acquireReplicaOperationLock(ShardId, long, String, ActionListener)}.
*/
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest);
@@ -215,7 +225,9 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("Failed to send response for {}", inner, actionName);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
}
}
});
@@ -227,33 +239,36 @@ public abstract class TransportReplicationAction<
}
}
- class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
+ class PrimaryOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<Request>> {
@Override
- public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ public void messageReceived(final ConcreteShardRequest<Request> request, final TransportChannel channel) throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
- public void messageReceived(Request request, TransportChannel channel, Task task) {
- new AsyncPrimaryAction(request, channel, (ReplicationTask) task).run();
+ public void messageReceived(ConcreteShardRequest<Request> request, TransportChannel channel, Task task) {
+ new AsyncPrimaryAction(request.request, request.targetAllocationID, channel, (ReplicationTask) task).run();
}
}
class AsyncPrimaryAction extends AbstractRunnable implements ActionListener<PrimaryShardReference> {
private final Request request;
+ /** targetAllocationID of the shard this request is meant for */
+ private final String targetAllocationID;
private final TransportChannel channel;
private final ReplicationTask replicationTask;
- AsyncPrimaryAction(Request request, TransportChannel channel, ReplicationTask replicationTask) {
+ AsyncPrimaryAction(Request request, String targetAllocationID, TransportChannel channel, ReplicationTask replicationTask) {
this.request = request;
+ this.targetAllocationID = targetAllocationID;
this.channel = channel;
this.replicationTask = replicationTask;
}
@Override
protected void doRun() throws Exception {
- acquirePrimaryShardReference(request.shardId(), this);
+ acquirePrimaryShardReference(request.shardId(), targetAllocationID, this);
}
@Override
@@ -268,7 +283,9 @@ public abstract class TransportReplicationAction<
final ShardRouting primary = primaryShardReference.routingEntry();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId());
- transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
+ transportService.sendRequest(relocatingNode, transportPrimaryAction,
+ new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId()),
+ transportOptions,
new TransportChannelResponseHandler<Response>(logger, channel, "rerouting indexing to target primary " + primary,
TransportReplicationAction.this::newResponseInstance) {
@@ -388,15 +405,17 @@ public abstract class TransportReplicationAction<
}
}
- class ReplicaOperationTransportHandler implements TransportRequestHandler<ReplicaRequest> {
+ class ReplicaOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<ReplicaRequest>> {
@Override
- public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception {
+ public void messageReceived(final ConcreteShardRequest<ReplicaRequest> request, final TransportChannel channel)
+ throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
- public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception {
- new AsyncReplicaAction(request, channel, (ReplicationTask) task).run();
+ public void messageReceived(ConcreteShardRequest<ReplicaRequest> requestWithAID, TransportChannel channel, Task task)
+ throws Exception {
+ new AsyncReplicaAction(requestWithAID.request, requestWithAID.targetAllocationID, channel, (ReplicationTask) task).run();
}
}
@@ -414,6 +433,8 @@ public abstract class TransportReplicationAction<
private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener<Releasable> {
private final ReplicaRequest request;
+ // allocation id of the replica this request is meant for
+ private final String targetAllocationID;
private final TransportChannel channel;
/**
* The task on the node with the replica shard.
@@ -423,10 +444,11 @@ public abstract class TransportReplicationAction<
// something we want to avoid at all costs
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
- AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) {
+ AsyncReplicaAction(ReplicaRequest request, String targetAllocationID, TransportChannel channel, ReplicationTask task) {
this.request = request;
this.channel = channel;
this.task = task;
+ this.targetAllocationID = targetAllocationID;
}
@Override
@@ -444,7 +466,13 @@ public abstract class TransportReplicationAction<
@Override
public void onFailure(Exception e) {
if (e instanceof RetryOnReplicaException) {
- logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request);
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "Retrying operation on replica, action [{}], request [{}]",
+ transportReplicaAction,
+ request),
+ e);
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@@ -455,7 +483,9 @@ public abstract class TransportReplicationAction<
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler =
new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE);
- transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
+ transportService.sendRequest(clusterService.localNode(), transportReplicaAction,
+ new ConcreteShardRequest<>(request, targetAllocationID),
+ handler);
}
@Override
@@ -479,7 +509,12 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (IOException responseException) {
responseException.addSuppressed(e);
- logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "failed to send error message back to client for action [{}]",
+ transportReplicaAction),
+ responseException);
}
}
@@ -487,7 +522,7 @@ public abstract class TransportReplicationAction<
protected void doRun() throws Exception {
setPhase(task, "replica");
assert request.shardId() != null : "request shardId must be set";
- acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), this);
+ acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), targetAllocationID, this);
}
/**
@@ -584,7 +619,7 @@ public abstract class TransportReplicationAction<
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
- performAction(node, transportPrimaryAction, true);
+ performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId()));
}
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
@@ -606,7 +641,7 @@ public abstract class TransportReplicationAction<
request.shardId(), request, state.version(), primary.currentNodeId());
}
setPhase(task, "rerouted");
- performAction(node, actionName, false);
+ performAction(node, actionName, false, request);
}
private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) {
@@ -657,8 +692,9 @@ public abstract class TransportReplicationAction<
}
}
- private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction) {
- transportService.sendRequest(node, action, request, transportOptions, new TransportResponseHandler<Response>() {
+ private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction,
+ final TransportRequest requestToPerform) {
+ transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler<Response>() {
@Override
public Response newInstance() {
@@ -682,8 +718,12 @@ public abstract class TransportReplicationAction<
final Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
(isPrimaryAction && retryPrimaryException(cause))) {
- logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(),
- request);
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
+ "received an error from node [{}] for request [{}], scheduling a retry",
+ node.getId(),
+ requestToPerform),
+ exp);
retry(exp);
} else {
finishAsFailed(exp);
@@ -704,6 +744,7 @@ public abstract class TransportReplicationAction<
return;
}
setPhase(task, "waiting_for_retry");
+ request.onRetry();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@@ -729,7 +770,9 @@ public abstract class TransportReplicationAction<
void finishAsFailed(Exception failure) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
- logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
listener.onFailure(failure);
} else {
assert false : "finishAsFailed called but operation is already finished";
@@ -737,7 +780,13 @@ public abstract class TransportReplicationAction<
}
void finishWithUnexpectedFailure(Exception failure) {
- logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "unexpected error during the primary phase for action [{}], request [{}]",
+ actionName,
+ request),
+ failure);
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
listener.onFailure(failure);
@@ -767,7 +816,8 @@ public abstract class TransportReplicationAction<
* tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}).
*/
- protected void acquirePrimaryShardReference(ShardId shardId, ActionListener<PrimaryShardReference> onReferenceAcquired) {
+ protected void acquirePrimaryShardReference(ShardId shardId, String allocationId,
+ ActionListener<PrimaryShardReference> onReferenceAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
// we may end up here if the cluster state used to route the primary is so stale that the underlying
@@ -777,6 +827,10 @@ public abstract class TransportReplicationAction<
throw new ReplicationOperation.RetryOnPrimaryException(indexShard.shardId(),
"actual shard is not a primary " + indexShard.routingEntry());
}
+ final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
+ if (actualAllocationId.equals(allocationId) == false) {
+ throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
+ }
ActionListener<Releasable> onAcquired = new ActionListener<Releasable>() {
@Override
@@ -796,9 +850,14 @@ public abstract class TransportReplicationAction<
/**
* tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node.
*/
- protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener<Releasable> onLockAcquired) {
+ protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, final String allocationId,
+ ActionListener<Releasable> onLockAcquired) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
+ final String actualAllocationId = indexShard.routingEntry().allocationId().getId();
+ if (actualAllocationId.equals(allocationId) == false) {
+ throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
+ }
indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor);
}
@@ -861,35 +920,112 @@ public abstract class TransportReplicationAction<
listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]"));
return;
}
- transportService.sendRequest(node, transportReplicaAction, request, transportOptions,
+ transportService.sendRequest(node, transportReplicaAction,
+ new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions,
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
}
@Override
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception,
- Runnable onSuccess, Consumer<Exception> onFailure, Consumer<Exception> onIgnoredFailure) {
- shardStateAction.remoteShardFailed(
- replica, primaryTerm, message, exception,
- new ShardStateAction.Listener() {
- @Override
- public void onSuccess() {
- onSuccess.run();
- }
+ Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, message, exception,
+ createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
+ }
- @Override
- public void onFailure(Exception shardFailedError) {
- if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
- onFailure.accept(shardFailedError);
- } else {
- // these can occur if the node is shutting down and are okay
- // any other exception here is not expected and merits investigation
- assert shardFailedError instanceof TransportException ||
- shardFailedError instanceof NodeClosedException : shardFailedError;
- onIgnoredFailure.accept(shardFailedError);
- }
+ @Override
+ public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, "mark copy as stale", null,
+ createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
+ }
+
+ private ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer<Exception> onPrimaryDemoted,
+ final Consumer<Exception> onIgnoredFailure) {
+ return new ShardStateAction.Listener() {
+ @Override
+ public void onSuccess() {
+ onSuccess.run();
+ }
+
+ @Override
+ public void onFailure(Exception shardFailedError) {
+ if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
+ onPrimaryDemoted.accept(shardFailedError);
+ } else {
+ // these can occur if the node is shutting down and are okay
+ // any other exception here is not expected and merits investigation
+ assert shardFailedError instanceof TransportException ||
+ shardFailedError instanceof NodeClosedException : shardFailedError;
+ onIgnoredFailure.accept(shardFailedError);
}
}
- );
+ };
+ }
+ }
+
+ /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/
+ public static final class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest {
+
+ /** {@link AllocationId#getId()} of the shard this request is sent to **/
+ private String targetAllocationID;
+
+ private R request;
+
+ ConcreteShardRequest(Supplier<R> requestSupplier) {
+ request = requestSupplier.get();
+ // null now, but will be populated by reading from the streams
+ targetAllocationID = null;
+ }
+
+ ConcreteShardRequest(R request, String targetAllocationID) {
+ Objects.requireNonNull(request);
+ Objects.requireNonNull(targetAllocationID);
+ this.request = request;
+ this.targetAllocationID = targetAllocationID;
+ }
+
+ @Override
+ public void setParentTask(String parentTaskNode, long parentTaskId) {
+ request.setParentTask(parentTaskNode, parentTaskId);
+ }
+
+ @Override
+ public void setParentTask(TaskId taskId) {
+ request.setParentTask(taskId);
+ }
+
+ @Override
+ public TaskId getParentTask() {
+ return request.getParentTask();
+ }
+ @Override
+ public Task createTask(long id, String type, String action, TaskId parentTaskId) {
+ return request.createTask(id, type, action, parentTaskId);
+ }
+
+ @Override
+ public String getDescription() {
+ return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "]";
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ targetAllocationID = in.readString();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(targetAllocationID);
+ request.writeTo(out);
+ }
+
+ public R getRequest() {
+ return request;
+ }
+
+ public String getTargetAllocationID() {
+ return targetAllocationID;
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
index 9261bea945..bf2b3235b1 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
@@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
@@ -39,6 +39,9 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
/**
@@ -128,29 +131,38 @@ public abstract class TransportWriteAction<
* We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the
* refresh in parallel on the primary and on the replica.
*/
- postWriteActions(indexShard, request, location, this, logger);
+ new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
}
@Override
public synchronized void respond(ActionListener<Response> listener) {
this.listener = listener;
- respondIfPossible();
+ respondIfPossible(null);
}
/**
* Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}.
*/
- protected void respondIfPossible() {
+ protected void respondIfPossible(Exception ex) {
if (finishedAsyncActions && listener != null) {
- super.respond(listener);
+ if (ex == null) {
+ super.respond(listener);
+ } else {
+ listener.onFailure(ex);
+ }
}
}
+ public synchronized void onFailure(Exception exception) {
+ finishedAsyncActions = true;
+ respondIfPossible(exception);
+ }
+
@Override
- public synchronized void respondAfterAsyncAction(boolean forcedRefresh) {
+ public synchronized void onSuccess(boolean forcedRefresh) {
finalResponse.setForcedRefresh(forcedRefresh);
finishedAsyncActions = true;
- respondIfPossible();
+ respondIfPossible(null);
}
}
@@ -162,68 +174,144 @@ public abstract class TransportWriteAction<
private ActionListener<TransportResponse.Empty> listener;
public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest<?> request, Translog.Location location) {
- postWriteActions(indexShard, request, location, this, logger);
+ new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
}
@Override
public void respond(ActionListener<TransportResponse.Empty> listener) {
this.listener = listener;
- respondIfPossible();
+ respondIfPossible(null);
}
/**
* Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}.
*/
- protected void respondIfPossible() {
+ protected void respondIfPossible(Exception ex) {
if (finishedAsyncActions && listener != null) {
- super.respond(listener);
+ if (ex == null) {
+ super.respond(listener);
+ } else {
+ listener.onFailure(ex);
+ }
}
}
@Override
- public synchronized void respondAfterAsyncAction(boolean forcedRefresh) {
+ public void onFailure(Exception ex) {
finishedAsyncActions = true;
- respondIfPossible();
+ respondIfPossible(ex);
+ }
+
+ @Override
+ public synchronized void onSuccess(boolean forcedRefresh) {
+ finishedAsyncActions = true;
+ respondIfPossible(null);
}
}
- private interface RespondingWriteResult {
- void respondAfterAsyncAction(boolean forcedRefresh);
+ /**
+ * callback used by {@link AsyncAfterWriteAction} to notify that all post
+ * process actions have been executed
+ */
+ interface RespondingWriteResult {
+ /**
+ * Called on successful processing of all post write actions
+ * @param forcedRefresh <code>true</code> iff this write has caused a refresh
+ */
+ void onSuccess(boolean forcedRefresh);
+
+ /**
+ * Called on failure if a post action failed.
+ */
+ void onFailure(Exception ex);
}
- static void postWriteActions(final IndexShard indexShard,
- final WriteRequest<?> request,
- @Nullable final Translog.Location location,
- final RespondingWriteResult respond,
- final ESLogger logger) {
- boolean pendingOps = false;
- boolean immediateRefresh = false;
- switch (request.getRefreshPolicy()) {
- case IMMEDIATE:
- indexShard.refresh("refresh_flag_index");
- immediateRefresh = true;
- break;
- case WAIT_UNTIL:
- if (location != null) {
- pendingOps = true;
- indexShard.addRefreshListener(location, forcedRefresh -> {
- if (forcedRefresh) {
- logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request);
- }
- respond.respondAfterAsyncAction(forcedRefresh);
- });
- }
- break;
- case NONE:
- break;
+ /**
+ * This class encapsulates post write actions like async waits for
+ * translog syncs or waiting for a refresh to happen making the write operation
+ * visible.
+ */
+ static final class AsyncAfterWriteAction {
+ private final Location location;
+ private final boolean waitUntilRefresh;
+ private final boolean sync;
+ private final AtomicInteger pendingOps = new AtomicInteger(1);
+ private final AtomicBoolean refreshed = new AtomicBoolean(false);
+ private final AtomicReference<Exception> syncFailure = new AtomicReference<>(null);
+ private final RespondingWriteResult respond;
+ private final IndexShard indexShard;
+ private final WriteRequest<?> request;
+ private final Logger logger;
+
+ AsyncAfterWriteAction(final IndexShard indexShard,
+ final WriteRequest<?> request,
+ @Nullable final Translog.Location location,
+ final RespondingWriteResult respond,
+ final Logger logger) {
+ this.indexShard = indexShard;
+ this.request = request;
+ boolean waitUntilRefresh = false;
+ switch (request.getRefreshPolicy()) {
+ case IMMEDIATE:
+ indexShard.refresh("refresh_flag_index");
+ refreshed.set(true);
+ break;
+ case WAIT_UNTIL:
+ if (location != null) {
+ waitUntilRefresh = true;
+ pendingOps.incrementAndGet();
+ }
+ break;
+ case NONE:
+ break;
+ default:
+ throw new IllegalArgumentException("unknown refresh policy: " + request.getRefreshPolicy());
+ }
+ this.waitUntilRefresh = waitUntilRefresh;
+ this.respond = respond;
+ this.location = location;
+ if ((sync = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null)) {
+ pendingOps.incrementAndGet();
+ }
+ this.logger = logger;
+ assert pendingOps.get() >= 0 && pendingOps.get() <= 3 : "pendingOpts was: " + pendingOps.get();
}
- boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null;
- if (fsyncTranslog) {
- indexShard.sync(location);
+
+ /** calls the response listener if all pending operations have returned otherwise it just decrements the pending opts counter.*/
+ private void maybeFinish() {
+ final int numPending = pendingOps.decrementAndGet();
+ if (numPending == 0) {
+ if (syncFailure.get() != null) {
+ respond.onFailure(syncFailure.get());
+ } else {
+ respond.onSuccess(refreshed.get());
+ }
+ }
+ assert numPending >= 0 && numPending <= 2: "numPending must either 2, 1 or 0 but was " + numPending ;
}
- indexShard.maybeFlush();
- if (pendingOps == false) {
- respond.respondAfterAsyncAction(immediateRefresh);
+
+ void run() {
+ // we either respond immediately ie. if we we don't fsync per request or wait for refresh
+ // OR we got an pass async operations on and wait for them to return to respond.
+ indexShard.maybeFlush();
+ maybeFinish(); // decrement the pendingOpts by one, if there is nothing else to do we just respond with success.
+ if (waitUntilRefresh) {
+ assert pendingOps.get() > 0;
+ indexShard.addRefreshListener(location, forcedRefresh -> {
+ if (forcedRefresh) {
+ logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request);
+ }
+ refreshed.set(forcedRefresh);
+ maybeFinish();
+ });
+ }
+ if (sync) {
+ assert pendingOps.get() > 0;
+ indexShard.sync(location, (ex) -> {
+ syncFailure.set(ex);
+ maybeFinish();
+ });
+ }
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
index a3964b6bbf..8981caa60f 100644
--- a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.single.shard;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.NoShardAvailableActionException;
@@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
@@ -187,7 +188,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
private void onFailure(ShardRouting shardRouting, Exception e) {
if (logger.isTraceEnabled() && e != null) {
- logger.trace("{}: failed to execute [{}]", e, shardRouting, internalRequest.request());
+ logger.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
}
perform(e);
}
@@ -205,7 +208,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
} else {
if (logger.isDebugEnabled()) {
- logger.debug("{}: failed to execute [{}]", failure, null, internalRequest.request());
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
}
}
listener.onFailure(failure);
diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
index a30d9c1f25..6752ccd729 100644
--- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.support.tasks;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
@@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@@ -275,7 +276,9 @@ public abstract class TransportTasksAction<
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
- logger.debug("failed to execute on node [{}]", t, nodeId);
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
index 1dd9b0c7d7..b83713e3a6 100644
--- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
@@ -179,7 +179,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
super(item.index());
this.id = item.id();
this.type = item.type();
- this.selectedFields(item.fields());
+ this.selectedFields(item.storedFields());
this.routing(item.routing());
this.parent(item.parent());
}
diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
index da12831f1c..8c1d06113d 100644
--- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
+++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action.termvectors;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@@ -87,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
- logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
}
diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
index d35c7bdb58..e5322f51d5 100644
--- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
+++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
@@ -186,7 +186,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override
public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
- if (request.fields() != null && request.fields().length > 0) {
+ if ((request.fetchSource() != null && request.fetchSource().fetchSource()) ||
+ (request.fields() != null && request.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
index 10b508d9a1..4920647053 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.update;
+import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@@ -28,9 +29,11 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
@@ -51,6 +54,7 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -76,7 +80,7 @@ public class UpdateHelper extends AbstractComponent {
public Result prepare(UpdateRequest request, IndexShard indexShard) {
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
- true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
+ true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
return prepare(indexShard.shardId(), request, getResult);
}
@@ -139,12 +143,7 @@ public class UpdateHelper extends AbstractComponent {
return new Result(indexRequest, DocWriteResponse.Result.CREATED, null, null);
}
- long updateVersion = getResult.getVersion();
-
- if (request.versionType() != VersionType.INTERNAL) {
- assert request.versionType() == VersionType.FORCE;
- updateVersion = request.version(); // remember, match_any is excluded by the conflict test
- }
+ final long updateVersion = getResult.getVersion();
if (getResult.internalSourceRef() == null) {
// no source, we can't do nothing, through a failure...
@@ -272,17 +271,19 @@ public class UpdateHelper extends AbstractComponent {
}
/**
- * Extracts the fields from the updated document to be returned in a update response
+ * Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
+ * For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
*/
public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
- if (request.fields() == null || request.fields().length == 0) {
+ if ((request.fields() == null || request.fields().length == 0) &&
+ (request.fetchSource() == null || request.fetchSource().fetchSource() == false)) {
return null;
}
+ SourceLookup sourceLookup = new SourceLookup();
+ sourceLookup.setSource(source);
boolean sourceRequested = false;
Map<String, GetField> fields = null;
if (request.fields() != null && request.fields().length > 0) {
- SourceLookup sourceLookup = new SourceLookup();
- sourceLookup.setSource(source);
for (String field : request.fields()) {
if (field.equals("_source")) {
sourceRequested = true;
@@ -303,8 +304,26 @@ public class UpdateHelper extends AbstractComponent {
}
}
+ BytesReference sourceFilteredAsBytes = sourceAsBytes;
+ if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
+ sourceRequested = true;
+ if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) {
+ Object value = sourceLookup.filter(request.fetchSource().includes(), request.fetchSource().excludes());
+ try {
+ final int initialCapacity = Math.min(1024, sourceAsBytes.length());
+ BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
+ try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) {
+ builder.value(value);
+ sourceFilteredAsBytes = builder.bytes();
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchException("Error filtering source", e);
+ }
+ }
+ }
+
// TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
- return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
+ return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceFilteredAsBytes : null, fields);
}
public static class Result {
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index 662d26117b..839f6a9d9a 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -42,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.Collections;
@@ -55,6 +58,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
implements DocumentRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
+ private static final DeprecationLogger DEPRECATION_LOGGER =
+ new DeprecationLogger(Loggers.getLogger(UpdateRequest.class));
private String type;
private String id;
@@ -68,6 +73,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
Script script;
private String[] fields;
+ private FetchSourceContext fetchSourceContext;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
@@ -106,8 +112,9 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
validationException = addValidationError("id is missing", validationException);
}
- if (!(versionType == VersionType.INTERNAL || versionType == VersionType.FORCE)) {
- validationException = addValidationError("version type [" + versionType + "] is not supported by the update API", validationException);
+ if (versionType != VersionType.INTERNAL) {
+ validationException = addValidationError("version type [" + versionType + "] is not supported by the update API",
+ validationException);
} else {
if (version != Versions.MATCH_ANY && retryOnConflict > 0) {
@@ -372,17 +379,80 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
+ * @deprecated Use {@link UpdateRequest#fetchSource(String[], String[])} instead
*/
+ @Deprecated
public UpdateRequest fields(String... fields) {
this.fields = fields;
return this;
}
/**
+ * Indicate that _source should be returned with every hit, with an
+ * "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include
+ * An optional include (optionally wildcarded) pattern to filter
+ * the returned _source
+ * @param exclude
+ * An optional exclude (optionally wildcarded) pattern to filter
+ * the returned _source
+ */
+ public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) {
+ this.fetchSourceContext = new FetchSourceContext(include, exclude);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned, with an
+ * "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes
+ * An optional list of include (optionally wildcarded) pattern to
+ * filter the returned _source
+ * @param excludes
+ * An optional list of exclude (optionally wildcarded) pattern to
+ * filter the returned _source
+ */
+ public UpdateRequest fetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ this.fetchSourceContext = new FetchSourceContext(includes, excludes);
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the updated _source.
+ */
+ public UpdateRequest fetchSource(boolean fetchSource) {
+ this.fetchSourceContext = new FetchSourceContext(fetchSource);
+ return this;
+ }
+
+ /**
+ * Explicitely set the fetch source context for this request
+ */
+ public UpdateRequest fetchSource(FetchSourceContext context) {
+ this.fetchSourceContext = context;
+ return this;
+ }
+
+
+ /**
* Get the fields to be returned.
+ * @deprecated Use {@link UpdateRequest#fetchSource()} instead
*/
+ @Deprecated
public String[] fields() {
- return this.fields;
+ return fields;
+ }
+
+ /**
+ * Gets the {@link FetchSourceContext} which defines how the _source should
+ * be fetched.
+ */
+ public FetchSourceContext fetchSource() {
+ return fetchSourceContext;
}
/**
@@ -619,16 +689,16 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return upsertRequest;
}
- public UpdateRequest source(XContentBuilder source) throws Exception {
- return source(source.bytes());
+ public UpdateRequest fromXContent(XContentBuilder source) throws Exception {
+ return fromXContent(source.bytes());
}
- public UpdateRequest source(byte[] source) throws Exception {
- return source(source, 0, source.length);
+ public UpdateRequest fromXContent(byte[] source) throws Exception {
+ return fromXContent(source, 0, source.length);
}
- public UpdateRequest source(byte[] source, int offset, int length) throws Exception {
- return source(new BytesArray(source, offset, length));
+ public UpdateRequest fromXContent(byte[] source, int offset, int length) throws Exception {
+ return fromXContent(new BytesArray(source, offset, length));
}
/**
@@ -647,7 +717,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return detectNoop;
}
- public UpdateRequest source(BytesReference source) throws Exception {
+ public UpdateRequest fromXContent(BytesReference source) throws Exception {
Script script = null;
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
XContentParser.Token token = parser.nextToken();
@@ -686,6 +756,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
+ } else if ("_source".equals(currentFieldName)) {
+ fetchSourceContext = FetchSourceContext.parse(parser);
}
}
if (script != null) {
@@ -730,13 +802,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc = new IndexRequest();
doc.readFrom(in);
}
- int size = in.readInt();
- if (size >= 0) {
- fields = new String[size];
- for (int i = 0; i < size; i++) {
- fields[i] = in.readString();
- }
- }
+ fields = in.readOptionalStringArray();
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
upsertRequest = new IndexRequest();
upsertRequest.readFrom(in);
@@ -773,14 +840,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc.id(id);
doc.writeTo(out);
}
- if (fields == null) {
- out.writeInt(-1);
- } else {
- out.writeInt(fields.length);
- for (String field : fields) {
- out.writeString(field);
- }
- }
+ out.writeOptionalStringArray(fields);
+ out.writeOptionalWriteable(fetchSourceContext);
if (upsertRequest == null) {
out.writeBoolean(false);
} else {
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index f2d80bfe66..bbbc9bafd8 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -25,17 +25,22 @@ import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
-import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.action.document.RestUpdateAction;
import org.elasticsearch.script.Script;
import java.util.Map;
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder>
implements WriteRequestBuilder<UpdateRequestBuilder> {
+ private static final DeprecationLogger DEPRECATION_LOGGER =
+ new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) {
super(client, action, new UpdateRequest());
@@ -90,13 +95,58 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
+ * @deprecated Use {@link UpdateRequestBuilder#setFetchSource(String[], String[])} instead
*/
+ @Deprecated
public UpdateRequestBuilder setFields(String... fields) {
+ DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
request.fields(fields);
return this;
}
/**
+ * Indicate that _source should be returned with every hit, with an
+ * "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include
+ * An optional include (optionally wildcarded) pattern to filter
+ * the returned _source
+ * @param exclude
+ * An optional exclude (optionally wildcarded) pattern to filter
+ * the returned _source
+ */
+ public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
+ request.fetchSource(include, exclude);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned, with an
+ * "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes
+ * An optional list of include (optionally wildcarded) pattern to
+ * filter the returned _source
+ * @param excludes
+ * An optional list of exclude (optionally wildcarded) pattern to
+ * filter the returned _source
+ */
+ public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ request.fetchSource(includes, excludes);
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the updated _source.
+ */
+ public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
+ request.fetchSource(fetchSource);
+ return this;
+ }
+
+ /**
* Sets the number of retries of a version conflict occurs because the document was updated between
* getting it and updating it. Defaults to 0.
*/
@@ -279,26 +329,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
- public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
- request.source(source);
- return this;
- }
-
- public UpdateRequestBuilder setSource(byte[] source) throws Exception {
- request.source(source);
- return this;
- }
-
- public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
- request.source(source, offset, length);
- return this;
- }
-
- public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
- request.source(source);
- return this;
- }
-
/**
* Sets whether the specified doc parameter should be used as upsert document.
*/
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
index 7fa6245d1e..8da3d6b658 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
@@ -19,16 +19,20 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.appender.ConsoleAppender;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cli.Terminal;
+import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@@ -38,12 +42,16 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URISyntaxException;
import java.nio.file.Path;
+import java.security.NoSuchAlgorithmException;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@@ -81,7 +89,7 @@ final class Bootstrap {
/** initialize native resources */
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
- final ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ final Logger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail
if (Natives.definitelyRunningAsRoot()) {
@@ -142,7 +150,7 @@ final class Bootstrap {
JvmInfo.jvmInfo();
}
- private void setup(boolean addShutdownHook, Environment environment) throws Exception {
+ private void setup(boolean addShutdownHook, Environment environment) throws BootstrapException {
Settings settings = environment.settings();
initializeNatives(
environment.tmpFile(),
@@ -166,15 +174,25 @@ final class Bootstrap {
});
}
- // look for jar hell
- JarHell.checkJarHell();
+ try {
+ // look for jar hell
+ JarHell.checkJarHell();
+ } catch (IOException | URISyntaxException e) {
+ throw new BootstrapException(e);
+ }
// install SM after natives, shutdown hooks, etc.
- Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
+ try {
+ Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
+ } catch (IOException | NoSuchAlgorithmException e) {
+ throw new BootstrapException(e);
+ }
node = new Node(environment) {
@Override
- protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
+ protected void validateNodeBeforeAcceptingRequests(
+ final Settings settings,
+ final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
BootstrapCheck.check(settings, boundTransportAddress);
}
};
@@ -189,7 +207,7 @@ final class Bootstrap {
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
}
- private void start() {
+ private void start() throws NodeValidationException {
node.start();
keepAliveThread.start();
}
@@ -210,13 +228,13 @@ final class Bootstrap {
}
/**
- * This method is invoked by {@link Elasticsearch#main(String[])}
- * to startup elasticsearch.
+ * This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch.
*/
static void init(
final boolean foreground,
final Path pidFile,
- final Map<String, String> esSettings) throws Exception {
+ final boolean quiet,
+ final Map<String, String> esSettings) throws BootstrapException, NodeValidationException, UserException {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
@@ -227,16 +245,29 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
- LogConfigurator.configure(environment.settings(), true);
+ try {
+ LogConfigurator.configure(environment);
+ } catch (IOException e) {
+ throw new BootstrapException(e);
+ }
checkForCustomConfFile();
if (environment.pidFile() != null) {
- PidFile.create(environment.pidFile(), true);
+ try {
+ PidFile.create(environment.pidFile(), true);
+ } catch (IOException e) {
+ throw new BootstrapException(e);
+ }
}
+ final boolean closeStandardStreams = (foreground == false) || quiet;
try {
- if (!foreground) {
- Loggers.disableConsoleLogging();
+ if (closeStandardStreams) {
+ final Logger rootLogger = ESLoggerFactory.getRootLogger();
+ final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
+ if (maybeConsoleAppender != null) {
+ Loggers.removeAppender(rootLogger, maybeConsoleAppender);
+ }
closeSystOut();
}
@@ -256,15 +287,17 @@ final class Bootstrap {
INSTANCE.start();
- if (!foreground) {
+ if (closeStandardStreams) {
closeSysError();
}
- } catch (Exception e) {
+ } catch (NodeValidationException | RuntimeException e) {
// disable console logging, so user does not see the exception twice (jvm will show it already)
- if (foreground) {
- Loggers.disableConsoleLogging();
+ final Logger rootLogger = ESLoggerFactory.getRootLogger();
+ final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
+ if (foreground && maybeConsoleAppender != null) {
+ Loggers.removeAppender(rootLogger, maybeConsoleAppender);
}
- ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ Logger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
}
@@ -272,17 +305,30 @@ final class Bootstrap {
if (e instanceof CreationException) {
// guice: log the shortened exc to the log file
ByteArrayOutputStream os = new ByteArrayOutputStream();
- PrintStream ps = new PrintStream(os, false, "UTF-8");
- new StartupError(e).printStackTrace(ps);
+ PrintStream ps = null;
+ try {
+ ps = new PrintStream(os, false, "UTF-8");
+ } catch (UnsupportedEncodingException uee) {
+ assert false;
+ e.addSuppressed(uee);
+ }
+ new StartupException(e).printStackTrace(ps);
ps.flush();
- logger.error("Guice Exception: {}", os.toString("UTF-8"));
+ try {
+ logger.error("Guice Exception: {}", os.toString("UTF-8"));
+ } catch (UnsupportedEncodingException uee) {
+ assert false;
+ e.addSuppressed(uee);
+ }
+ } else if (e instanceof NodeValidationException) {
+ logger.error("node validation exception\n{}", e.getMessage());
} else {
// full exception
logger.error("Exception", e);
}
// re-enable it if appropriate, so they can see any logging during the shutdown process
- if (foreground) {
- Loggers.enableConsoleLogging();
+ if (foreground && maybeConsoleAppender != null) {
+ Loggers.addAppender(rootLogger, maybeConsoleAppender);
}
throw e;
@@ -310,7 +356,7 @@ final class Bootstrap {
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
- ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ Logger logger = Loggers.getLogger(Bootstrap.class);
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
exit(1);
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
index b32751d757..de80b487c7 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
@@ -19,18 +19,20 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import java.io.BufferedReader;
import java.io.IOException;
@@ -61,10 +63,9 @@ final class BootstrapCheck {
* @param settings the current node settings
* @param boundTransportAddress the node network bindings
*/
- static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) {
+ static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
check(
enforceLimits(boundTransportAddress),
- BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS.get(settings),
checks(settings),
Node.NODE_NAME_SETTING.get(settings));
}
@@ -75,14 +76,15 @@ final class BootstrapCheck {
*
* @param enforceLimits true if the checks should be enforced or
* otherwise warned
- * @param ignoreSystemChecks true if system checks should be enforced
- * or otherwise warned
* @param checks the checks to execute
* @param nodeName the node name to be used as a logging prefix
*/
// visible for testing
- static void check(final boolean enforceLimits, final boolean ignoreSystemChecks, final List<Check> checks, final String nodeName) {
- check(enforceLimits, ignoreSystemChecks, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
+ static void check(
+ final boolean enforceLimits,
+ final List<Check> checks,
+ final String nodeName) throws NodeValidationException {
+ check(enforceLimits, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
}
/**
@@ -91,29 +93,23 @@ final class BootstrapCheck {
*
* @param enforceLimits true if the checks should be enforced or
* otherwise warned
- * @param ignoreSystemChecks true if system checks should be enforced
- * or otherwise warned
* @param checks the checks to execute
* @param logger the logger to
*/
static void check(
final boolean enforceLimits,
- final boolean ignoreSystemChecks,
final List<Check> checks,
- final ESLogger logger) {
+ final Logger logger) throws NodeValidationException {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
- if (enforceLimits && ignoreSystemChecks) {
- logger.warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
- }
for (final Check check : checks) {
if (check.check()) {
- if ((!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) && !check.alwaysEnforce()) {
+ if (!enforceLimits && !check.alwaysEnforce()) {
ignoredErrors.add(check.errorMessage());
} else {
errors.add(check.errorMessage());
@@ -129,14 +125,14 @@ final class BootstrapCheck {
final List<String> messages = new ArrayList<>(1 + errors.size());
messages.add("bootstrap checks failed");
messages.addAll(errors);
- final RuntimeException re = new RuntimeException(String.join("\n", messages));
- errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed);
- throw re;
+ final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
+ errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
+ throw ne;
}
}
- static void log(final ESLogger logger, final String error) {
+ static void log(final Logger logger, final String error) {
logger.warn(error);
}
@@ -166,11 +162,11 @@ final class BootstrapCheck {
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
- checks.add(new MinMasterNodesCheck(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(settings)));
if (Constants.LINUX) {
checks.add(new MaxMapCountCheck());
}
checks.add(new ClientJvmCheck());
+ checks.add(new UseSerialGCCheck());
checks.add(new OnErrorCheck());
checks.add(new OnOutOfMemoryErrorCheck());
return Collections.unmodifiableList(checks);
@@ -195,14 +191,6 @@ final class BootstrapCheck {
*/
String errorMessage();
- /**
- * test if the check is a system-level check
- *
- * @return true if the check is a system-level check as opposed
- * to an Elasticsearch-level check
- */
- boolean isSystemCheck();
-
default boolean alwaysEnforce() {
return false;
}
@@ -239,11 +227,6 @@ final class BootstrapCheck {
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
}
- @Override
- public final boolean isSystemCheck() {
- return false;
- }
-
}
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
@@ -293,11 +276,6 @@ final class BootstrapCheck {
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
}
- @Override
- public final boolean isSystemCheck() {
- return true;
- }
-
}
static class MlockallCheck implements Check {
@@ -323,37 +301,6 @@ final class BootstrapCheck {
return Natives.isMemoryLocked();
}
- @Override
- public final boolean isSystemCheck() {
- return true;
- }
-
- }
-
- static class MinMasterNodesCheck implements Check {
-
- final boolean minMasterNodesIsSet;
-
- MinMasterNodesCheck(boolean minMasterNodesIsSet) {
- this.minMasterNodesIsSet = minMasterNodesIsSet;
- }
-
- @Override
- public boolean check() {
- return minMasterNodesIsSet == false;
- }
-
- @Override
- public String errorMessage() {
- return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
- "] to a majority of the number of master eligible nodes in your cluster";
- }
-
- @Override
- public final boolean isSystemCheck() {
- return false;
- }
-
}
static class MaxNumberOfThreadsCheck implements Check {
@@ -380,11 +327,6 @@ final class BootstrapCheck {
return JNANatives.MAX_NUMBER_OF_THREADS;
}
- @Override
- public final boolean isSystemCheck() {
- return true;
- }
-
}
static class MaxSizeVirtualMemoryCheck implements Check {
@@ -413,11 +355,6 @@ final class BootstrapCheck {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
}
- @Override
- public final boolean isSystemCheck() {
- return true;
- }
-
}
static class MaxMapCountCheck implements Check {
@@ -444,7 +381,7 @@ final class BootstrapCheck {
}
// visible for testing
- long getMaxMapCount(ESLogger logger) {
+ long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
@@ -452,11 +389,15 @@ final class BootstrapCheck {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
- logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "unable to parse vm.max_map_count [{}]",
+ rawProcSysVmMaxMapCount),
+ e);
}
}
} catch (final IOException e) {
- logger.warn("I/O exception while trying to read [{}]", e, path);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}
@@ -481,11 +422,6 @@ final class BootstrapCheck {
return Long.parseLong(procSysVmMaxMapCount);
}
- @Override
- public final boolean isSystemCheck() {
- return true;
- }
-
}
static class ClientJvmCheck implements BootstrapCheck.Check {
@@ -508,9 +444,31 @@ final class BootstrapCheck {
getVmName());
}
+ }
+
+ /**
+ * Checks if the serial collector is in use. This collector is single-threaded and devastating
+ * for performance and should not be used for a server application like Elasticsearch.
+ */
+ static class UseSerialGCCheck implements BootstrapCheck.Check {
+
+ @Override
+ public boolean check() {
+ return getUseSerialGC().equals("true");
+ }
+
+ // visible for testing
+ String getUseSerialGC() {
+ return JvmInfo.jvmInfo().useSerialGC();
+ }
+
@Override
- public final boolean isSystemCheck() {
- return false;
+ public String errorMessage() {
+ return String.format(
+ Locale.ROOT,
+ "JVM is using the serial collector but should not be for the best performance; " +
+ "either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified",
+ JvmInfo.jvmInfo().getVmName());
}
}
@@ -531,11 +489,6 @@ final class BootstrapCheck {
abstract boolean mightFork();
@Override
- public final boolean isSystemCheck() {
- return false;
- }
-
- @Override
public final boolean alwaysEnforce() {
return true;
}
diff --git a/core/src/main/java/org/apache/log4j/Java9Hack.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java
index 831cf5b35a..540a732dfa 100644
--- a/core/src/main/java/org/apache/log4j/Java9Hack.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java
@@ -17,21 +17,27 @@
* under the License.
*/
-package org.apache.log4j;
+package org.elasticsearch.bootstrap;
-import org.apache.log4j.helpers.ThreadLocalMap;
+import java.nio.file.Path;
+import java.util.Map;
/**
- * Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
- *
- * This hack fixes up the pkg private members as if it had detected the java version correctly.
+ * Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked
+ * during bootstrap should explicitly declare the checked exceptions that they can throw, rather
+ * than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
+ * these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to
+ * declare all of these checked exceptions.
*/
-public class Java9Hack {
+class BootstrapException extends Exception {
- public static void fixLog4j() {
- if (MDC.mdc.tlm == null) {
- MDC.mdc.java1 = false;
- MDC.mdc.tlm = new ThreadLocalMap();
- }
+ /**
+ * Wraps an existing exception.
+ *
+ * @param cause the underlying cause of bootstrap failing
+ */
+ BootstrapException(final Exception cause) {
+ super(cause);
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java
index ad37916881..e8015d83af 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java
@@ -37,7 +37,5 @@ public final class BootstrapSettings {
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
public static final Setting<Boolean> CTRLHANDLER_SETTING =
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
- public static final Setting<Boolean> IGNORE_SYSTEM_BOOTSTRAP_CHECKS =
- Setting.boolSetting("bootstrap.ignore_system_bootstrap_checks", false, Property.NodeScope);
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
index 9c76fdfb03..b4ec024b9e 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
@@ -23,13 +23,13 @@ import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import joptsimple.OptionSpecBuilder;
import joptsimple.util.PathConverter;
-import joptsimple.util.PathProperties;
import org.elasticsearch.Build;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.SettingCommand;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.node.NodeValidationException;
import java.io.IOException;
import java.nio.file.Path;
@@ -44,6 +44,7 @@ class Elasticsearch extends SettingCommand {
private final OptionSpecBuilder versionOption;
private final OptionSpecBuilder daemonizeOption;
private final OptionSpec<Path> pidfileOption;
+ private final OptionSpecBuilder quietOption;
// visible for testing
Elasticsearch() {
@@ -58,6 +59,10 @@ class Elasticsearch extends SettingCommand {
.availableUnless(versionOption)
.withRequiredArg()
.withValuesConvertedBy(new PathConverter());
+ quietOption = parser.acceptsAll(Arrays.asList("q", "quiet"),
+ "Turns off standard ouput/error streams logging in console")
+ .availableUnless(versionOption)
+ .availableUnless(daemonizeOption);
}
/**
@@ -76,7 +81,7 @@ class Elasticsearch extends SettingCommand {
}
@Override
- protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
+ protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws UserException {
if (options.nonOptionArguments().isEmpty() == false) {
throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
}
@@ -92,17 +97,23 @@ class Elasticsearch extends SettingCommand {
final boolean daemonize = options.has(daemonizeOption);
final Path pidFile = pidfileOption.value(options);
+ final boolean quiet = options.has(quietOption);
- init(daemonize, pidFile, settings);
+ try {
+ init(daemonize, pidFile, quiet, settings);
+ } catch (NodeValidationException e) {
+ throw new UserException(ExitCodes.CONFIG, e.getMessage());
+ }
}
- void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) {
+ void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
+ throws NodeValidationException, UserException {
try {
- Bootstrap.init(!daemonize, pidFile, esSettings);
- } catch (final Throwable t) {
+ Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
+ } catch (BootstrapException | RuntimeException e) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
- throw new StartupError(t);
+ throw new StartupException(e);
}
}
@@ -112,9 +123,11 @@ class Elasticsearch extends SettingCommand {
*
* http://commons.apache.org/proper/commons-daemon/procrun.html
*
- * NOTE: If this method is renamed and/or moved, make sure to update service.bat!
+ * NOTE: If this method is renamed and/or moved, make sure to
+ * update elasticsearch-service.bat!
*/
static void close(String[] args) throws IOException {
Bootstrap.stop();
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java
index 45d54ed4a6..b1df4f5ccc 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java
@@ -19,9 +19,10 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOError;
@@ -76,14 +77,17 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
// visible for testing
void onFatalUncaught(final String threadName, final Throwable t) {
- final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
- logger.error("fatal error in thread [{}], exiting", t, threadName);
+ final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
+ logger.error(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
}
// visible for testing
void onNonFatalUncaught(final String threadName, final Throwable t) {
- final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
- logger.warn("uncaught exception in thread [{}]", t, threadName);
+ final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
+ logger.warn((org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
}
// visible for testing
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
index 5d1369b21f..fe0f400698 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
@@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.NativeLong;
import com.sun.jna.Structure;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.Arrays;
@@ -34,7 +34,7 @@ import java.util.List;
*/
final class JNACLibrary {
- private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class);
+ private static final Logger logger = Loggers.getLogger(JNACLibrary.class);
public static final int MCL_CURRENT = 1;
public static final int ENOMEM = 12;
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java
index 50dab6888b..747ba2e458 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java
@@ -25,8 +25,8 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.win32.StdCallLibrary;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.ArrayList;
@@ -40,7 +40,7 @@ import java.util.List;
*/
final class JNAKernel32Library {
- private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
+ private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class);
// Callbacks must be kept around in order to be able to be called later,
// when the Windows ConsoleCtrlHandler sends an event.
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
index 5a8693b313..5f3e357ff5 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
@@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.monitor.jvm.JvmInfo;
@@ -39,7 +39,7 @@ class JNANatives {
/** no instantiation */
private JNANatives() {}
- private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
+ private static final Logger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false;
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java
index 3f77f6bcee..ebf9ab5f55 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java
@@ -19,14 +19,15 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.net.MalformedURLException;
+import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.FileVisitResult;
@@ -74,9 +75,9 @@ public class JarHell {
* Checks the current classpath for duplicate classes
* @throws IllegalStateException if jar hell was found
*/
- public static void checkJarHell() throws Exception {
+ public static void checkJarHell() throws IOException, URISyntaxException {
ClassLoader loader = JarHell.class.getClassLoader();
- ESLogger logger = Loggers.getLogger(JarHell.class);
+ Logger logger = Loggers.getLogger(JarHell.class);
if (logger.isDebugEnabled()) {
logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
@@ -86,7 +87,7 @@ public class JarHell {
}
checkJarHell(parseClassPath());
}
-
+
/**
* Parses the classpath into an array of URLs
* @return array of URLs
@@ -149,8 +150,8 @@ public class JarHell {
* @throws IllegalStateException if jar hell was found
*/
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
- public static void checkJarHell(URL urls[]) throws Exception {
- ESLogger logger = Loggers.getLogger(JarHell.class);
+ public static void checkJarHell(URL urls[]) throws URISyntaxException, IOException {
+ Logger logger = Loggers.getLogger(JarHell.class);
// we don't try to be sneaky and use deprecated/internal/not portable stuff
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
// a "list" at all. So just exclude any elements underneath the java home
@@ -168,7 +169,7 @@ public class JarHell {
if (path.toString().endsWith(".jar")) {
if (!seenJars.add(path)) {
logger.debug("excluding duplicate classpath element: {}", path);
- continue; // we can't fail because of sheistiness with joda-time
+ continue;
}
logger.debug("examining jar: {}", path);
try (JarFile file = new JarFile(path.toString())) {
@@ -271,11 +272,19 @@ public class JarHell {
"class: " + clazz + System.lineSeparator() +
"exists multiple times in jar: " + jarpath + " !!!!!!!!!");
} else {
- if (clazz.startsWith("org.apache.log4j")) {
- return; // go figure, jar hell for what should be System.out.println...
- }
- if (clazz.equals("org.joda.time.base.BaseDateTime")) {
- return; // apparently this is intentional... clean this up
+ if (clazz.startsWith("org.apache.logging.log4j.core.impl.ThrowableProxy")) {
+ /*
+ * deliberate to hack around a bug in Log4j
+ * cf. https://github.com/elastic/elasticsearch/issues/20304
+ * cf. https://issues.apache.org/jira/browse/LOG4J2-1560
+ */
+ return;
+ } else if (clazz.startsWith("org.apache.logging.log4j.core.jmx.Server")) {
+ /*
+ * deliberate to hack around a bug in Log4j
+ * cf. https://issues.apache.org/jira/browse/LOG4J2-1506
+ */
+ return;
}
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
"class: " + clazz + System.lineSeparator() +
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java
index 6dba1f3a1b..9fad34e329 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java
@@ -19,7 +19,7 @@
package org.elasticsearch.bootstrap;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import java.nio.file.Path;
@@ -32,7 +32,7 @@ final class Natives {
/** no instantiation */
private Natives() {}
- private static final ESLogger logger = Loggers.getLogger(Natives.class);
+ private static final Logger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
static final boolean JNA_AVAILABLE;
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java
index 6f6c3dc557..88c618d445 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java
@@ -26,9 +26,9 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.ptr.PointerByReference;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@@ -92,7 +92,7 @@ import java.util.Map;
*/
// not an example of how to write code!!!
final class Seccomp {
- private static final ESLogger logger = Loggers.getLogger(Seccomp.class);
+ private static final Logger logger = Loggers.getLogger(Seccomp.class);
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java
index 909d3dc153..e45e42757c 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java
@@ -114,13 +114,13 @@ final class Security {
* @param environment configuration for generating dynamic permissions
* @param filterBadDefaults true if we should filter out bad java defaults in the system policy.
*/
- static void configure(Environment environment, boolean filterBadDefaults) throws Exception {
+ static void configure(Environment environment, boolean filterBadDefaults) throws IOException, NoSuchAlgorithmException {
// enable security policy: union of template and environment-based paths, and possibly plugin permissions
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
// enable security manager
- System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap." }));
+ System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap.", "org.elasticsearch.cli" }));
// do some basic tests
selfTest();
@@ -257,11 +257,6 @@ final class Security {
for (Path path : environment.dataFiles()) {
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
- // TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
- assert Version.CURRENT.major < 6 : "cluster name is no longer used in data path";
- for (Path path : environment.dataWithClusterFiles()) {
- addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
- }
for (Path path : environment.repoFiles()) {
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/StartupError.java b/core/src/main/java/org/elasticsearch/bootstrap/StartupException.java
index 781aac31f6..a78f82ef3e 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/StartupError.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/StartupException.java
@@ -23,6 +23,8 @@ import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.util.function.Consumer;
/**
* Wraps an exception in a special way that it gets formatted
@@ -32,18 +34,18 @@ import java.io.PrintStream;
*/
//TODO: remove this when guice is removed, and exceptions are cleaned up
//this is horrible, but its what we must do
-final class StartupError extends RuntimeException {
-
+final class StartupException extends RuntimeException {
+
/** maximum length of a stacktrace, before we truncate it */
static final int STACKTRACE_LIMIT = 30;
/** all lines from this package are RLE-compressed */
static final String GUICE_PACKAGE = "org.elasticsearch.common.inject";
-
- /**
- * Create a new StartupError that will format {@code cause}
+
+ /**
+ * Create a new StartupException that will format {@code cause}
* to the console on failure.
*/
- StartupError(Throwable cause) {
+ StartupException(Throwable cause) {
super(cause);
}
@@ -53,15 +55,24 @@ final class StartupError extends RuntimeException {
*/
@Override
public void printStackTrace(PrintStream s) {
+ printStackTrace(s::println);
+ }
+
+ @Override
+ public void printStackTrace(PrintWriter s) {
+ printStackTrace(s::println);
+ }
+
+ private void printStackTrace(Consumer<String> consumer) {
Throwable originalCause = getCause();
Throwable cause = originalCause;
if (cause instanceof CreationException) {
cause = getFirstGuiceCause((CreationException)cause);
}
-
+
String message = cause.toString();
- s.println(message);
-
+ consumer.accept(message);
+
if (cause != null) {
// walk to the root cause
while (cause.getCause() != null) {
@@ -70,7 +81,7 @@ final class StartupError extends RuntimeException {
// print the root cause message, only if it differs!
if (cause != originalCause && (message.equals(cause.toString()) == false)) {
- s.println("Likely root cause: " + cause);
+ consumer.accept("Likely root cause: " + cause);
}
// print stacktrace of cause
@@ -78,33 +89,33 @@ final class StartupError extends RuntimeException {
int linesWritten = 0;
for (int i = 0; i < stack.length; i++) {
if (linesWritten == STACKTRACE_LIMIT) {
- s.println("\t<<<truncated>>>");
+ consumer.accept("\t<<<truncated>>>");
break;
}
String line = stack[i].toString();
-
+
// skip past contiguous runs of this garbage:
if (line.startsWith(GUICE_PACKAGE)) {
while (i + 1 < stack.length && stack[i + 1].toString().startsWith(GUICE_PACKAGE)) {
i++;
}
- s.println("\tat <<<guice>>>");
+ consumer.accept("\tat <<<guice>>>");
linesWritten++;
continue;
}
- s.println("\tat " + line.toString());
+ consumer.accept("\tat " + line.toString());
linesWritten++;
}
}
// if its a guice exception, the whole thing really will not be in the log, its megabytes.
// refer to the hack in bootstrap, where we don't log it
if (originalCause instanceof CreationException == false) {
- s.println("Refer to the log for complete error details.");
+ consumer.accept("Refer to the log for complete error details.");
}
}
-
- /**
+
+ /**
* Returns first cause from a guice error (it can have multiple).
*/
static Throwable getFirstGuiceCause(CreationException guice) {
@@ -116,4 +127,5 @@ final class StartupError extends RuntimeException {
}
return guice; // we tried
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java
index 2e896759eb..cd7a02e696 100644
--- a/core/src/main/java/org/elasticsearch/cli/Command.java
+++ b/core/src/main/java/org/elasticsearch/cli/Command.java
@@ -23,7 +23,10 @@ import joptsimple.OptionException;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
+import org.apache.logging.log4j.Level;
import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.logging.LogConfigurator;
+import org.elasticsearch.common.settings.Settings;
import java.io.IOException;
import java.util.Arrays;
@@ -50,6 +53,11 @@ public abstract class Command {
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal) throws Exception {
+ // initialize default for es.logger.level because we will not read the log4j2.properties
+ final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
+ final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
+ LogConfigurator.configureWithoutConfig(settings);
+
try {
mainWithoutErrorHandling(args, terminal);
} catch (OptionException e) {
diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
index f7ce9f929b..9f0675f308 100644
--- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
+++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
@@ -45,6 +45,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.SearchPlugin;
@@ -52,6 +53,7 @@ import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport;
+import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
@@ -119,10 +121,9 @@ public abstract class TransportClient extends AbstractClient {
}
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
- NetworkModule networkModule = new NetworkModule(networkService, settings, true);
SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
- entries.addAll(networkModule.getNamedWriteables());
+ entries.addAll(NetworkModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
entries.addAll(pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.getNamedWriteables().stream())
@@ -134,9 +135,7 @@ public abstract class TransportClient extends AbstractClient {
for (Module pluginModule : pluginsService.createGuiceModules()) {
modules.add(pluginModule);
}
- modules.add(networkModule);
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
- modules.add(searchModule);
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);
@@ -148,15 +147,22 @@ public abstract class TransportClient extends AbstractClient {
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
+ NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
+ bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
+ final Transport transport = networkModule.getTransportSupplier().get();
+ final TransportService transportService = new TransportService(settings, transport, threadPool,
+ networkModule.getTransportInterceptor());
modules.add((b -> {
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
+ b.bind(Transport.class).toInstance(transport);
+ b.bind(TransportService.class).toInstance(transportService);
+ b.bind(NetworkService.class).toInstance(networkService);
}));
Injector injector = modules.createInjector();
- final TransportService transportService = injector.getInstance(TransportService.class);
final TransportClientNodesService nodesService =
new TransportClientNodesService(settings, transportService, threadPool);
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
index 7bc0f54648..18c2d15ec3 100644
--- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
+++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
@@ -20,7 +20,8 @@
package org.elasticsearch.client.transport;
import com.carrotsearch.hppc.cursors.ObjectCursor;
-
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@@ -32,9 +33,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.Randomness;
+import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -43,11 +43,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.FutureTransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
@@ -340,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNode(node);
} catch (Exception e) {
it.remove();
- logger.debug("failed to connect to discovered node [{}]", e, node);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
}
}
}
@@ -377,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode);
} catch (Exception e) {
- logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
+ logger.debug(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
newFilteredNodes.add(listedNode);
continue;
}
@@ -409,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
newNodes.add(listedNode);
}
} catch (Exception e) {
- logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
+ logger.info(
+ (Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
}
}
@@ -453,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNodeLight(listedNode);
}
} catch (Exception e) {
- logger.debug("failed to connect to node [{}], ignoring...", e, listedNode);
+ logger.debug(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
latch.countDown();
return;
}
@@ -482,13 +487,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
@Override
public void handleException(TransportException e) {
- logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode);
+ logger.info(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to get local cluster state for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}
});
} catch (Exception e) {
- logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode);
+ logger.info(
+ (Supplier<?>)() -> new ParameterizedMessage(
+ "failed to get local cluster state info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
index 581844e28f..4e582cb32c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@@ -44,104 +45,123 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
-import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.plugins.ClusterPlugin;
import org.elasticsearch.tasks.TaskResultsService;
-import java.util.Arrays;
-import java.util.Collections;
+import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Objects;
import java.util.function.Function;
+import java.util.function.Supplier;
/**
* Configures classes and services that affect the entire cluster.
*/
public class ClusterModule extends AbstractModule {
- public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard";
public static final String BALANCED_ALLOCATOR = "balanced"; // default
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
- public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
- Collections.unmodifiableList(Arrays.asList(
- MaxRetryAllocationDecider.class,
- SameShardAllocationDecider.class,
- FilterAllocationDecider.class,
- ReplicaAfterPrimaryActiveAllocationDecider.class,
- ThrottlingAllocationDecider.class,
- RebalanceOnlyWhenActiveAllocationDecider.class,
- ClusterRebalanceAllocationDecider.class,
- ConcurrentRebalanceAllocationDecider.class,
- EnableAllocationDecider.class,
- AwarenessAllocationDecider.class,
- ShardsLimitAllocationDecider.class,
- NodeVersionAllocationDecider.class,
- DiskThresholdDecider.class,
- SnapshotInProgressAllocationDecider.class));
private final Settings settings;
- private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
- private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
- private final ExtensionPoint.ClassSet<IndexTemplateFilter> indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class);
private final ClusterService clusterService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
+ // pkg private for tests
+ final Collection<AllocationDecider> allocationDeciders;
+ final ShardsAllocator shardsAllocator;
// pkg private so tests can mock
Class<? extends ClusterInfoService> clusterInfoServiceImpl = InternalClusterInfoService.class;
- public ClusterModule(Settings settings, ClusterService clusterService) {
+ public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins) {
this.settings = settings;
- for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
- registerAllocationDecider(decider);
- }
- registerShardsAllocator(ClusterModule.BALANCED_ALLOCATOR, BalancedShardsAllocator.class);
- registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
+ this.allocationDeciders = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
+ this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins);
this.clusterService = clusterService;
indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
}
- public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
- allocationDeciders.registerExtension(allocationDecider);
+ public IndexNameExpressionResolver getIndexNameExpressionResolver() {
+ return indexNameExpressionResolver;
}
- public void registerShardsAllocator(String name, Class<? extends ShardsAllocator> clazz) {
- shardsAllocators.registerExtension(name, clazz);
+ // TODO: this is public so allocation benchmark can access the default deciders...can we do that in another way?
+ /** Return a new {@link AllocationDecider} instance with builtin deciders as well as those from plugins. */
+ public static Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings,
+ List<ClusterPlugin> clusterPlugins) {
+ // collect deciders by class so that we can detect duplicates
+ Map<Class, AllocationDecider> deciders = new HashMap<>();
+ addAllocationDecider(deciders, new MaxRetryAllocationDecider(settings));
+ addAllocationDecider(deciders, new SameShardAllocationDecider(settings));
+ addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider(settings));
+ addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider(settings));
+ addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings));
+ addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
+ addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings, clusterSettings));
+
+ clusterPlugins.stream()
+ .flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream())
+ .forEach(d -> addAllocationDecider(deciders, d));
+
+ return deciders.values();
}
- public void registerIndexTemplateFilter(Class<? extends IndexTemplateFilter> indexTemplateFilter) {
- indexTemplateFilters.registerExtension(indexTemplateFilter);
+ /** Add the given allocation decider to the given deciders collection, erroring if the class name is already used. */
+ private static void addAllocationDecider(Map<Class, AllocationDecider> deciders, AllocationDecider decider) {
+ if (deciders.put(decider.getClass(), decider) != null) {
+ throw new IllegalArgumentException("Cannot specify allocation decider [" + decider.getClass().getName() + "] twice");
+ }
}
- public IndexNameExpressionResolver getIndexNameExpressionResolver() {
- return indexNameExpressionResolver;
+ private static ShardsAllocator createShardsAllocator(Settings settings, ClusterSettings clusterSettings,
+ List<ClusterPlugin> clusterPlugins) {
+ Map<String, Supplier<ShardsAllocator>> allocators = new HashMap<>();
+ allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(settings, clusterSettings));
+
+ for (ClusterPlugin plugin : clusterPlugins) {
+ plugin.getShardsAllocators(settings, clusterSettings).forEach((k, v) -> {
+ if (allocators.put(k, v) != null) {
+ throw new IllegalArgumentException("ShardsAllocator [" + k + "] already defined");
+ }
+ });
+ }
+ String allocatorName = SHARDS_ALLOCATOR_TYPE_SETTING.get(settings);
+ Supplier<ShardsAllocator> allocatorSupplier = allocators.get(allocatorName);
+ if (allocatorSupplier == null) {
+ throw new IllegalArgumentException("Unknown ShardsAllocator [" + allocatorName + "]");
+ }
+ return Objects.requireNonNull(allocatorSupplier.get(),
+ "ShardsAllocator factory for [" + allocatorName + "] returned null");
}
@Override
protected void configure() {
- // bind ShardsAllocator
- String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.BALANCED_ALLOCATOR);
- if (shardsAllocatorType.equals(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR)) {
- final ESLogger logger = Loggers.getLogger(getClass(), settings);
- logger.warn("{} allocator has been removed in 2.0 using {} instead", ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, ClusterModule.BALANCED_ALLOCATOR);
- }
- allocationDeciders.bind(binder());
- indexTemplateFilters.bind(binder());
-
bind(ClusterInfoService.class).to(clusterInfoServiceImpl).asEagerSingleton();
bind(GatewayAllocator.class).asEagerSingleton();
bind(AllocationService.class).asEagerSingleton();
@@ -161,5 +181,7 @@ public class ClusterModule extends AbstractModule {
bind(NodeMappingRefreshAction.class).asEagerSingleton();
bind(MappingUpdatedAction.class).asEagerSingleton();
bind(TaskResultsService.class).asEagerSingleton();
+ bind(AllocationDeciders.class).toInstance(new AllocationDeciders(settings, allocationDeciders));
+ bind(ShardsAllocator.class).toInstance(shardsAllocator);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
index 632179153f..e592b5092b 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -22,7 +22,6 @@ package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -37,7 +36,6 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
@@ -292,7 +290,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
sb.append(TAB).append(TAB).append(shard).append(": ");
sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], ");
- sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n");
+ sb.append("isa_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n");
}
}
sb.append(blocks().prettyPrint());
@@ -501,8 +499,8 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
builder.endObject();
- builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
- for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
+ builder.startObject(IndexMetaData.KEY_IN_SYNC_ALLOCATIONS);
+ for (IntObjectCursor<Set<String>> cursor : indexMetaData.getInSyncAllocationIds()) {
builder.startArray(String.valueOf(cursor.key));
for (String allocationId : cursor.value) {
builder.value(allocationId);
@@ -629,12 +627,6 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
return nodes;
}
- public Builder routingResult(RoutingAllocation.Result routingResult) {
- this.routingTable = routingResult.routingTable();
- this.metaData = routingResult.metaData();
- return this;
- }
-
public Builder routingTable(RoutingTable routingTable) {
this.routingTable = routingTable;
return this;
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
index 228ac3f41b..e18ec5543d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
@@ -19,10 +19,10 @@
package org.elasticsearch.cluster;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference;
*/
public class ClusterStateObserver {
- protected final ESLogger logger;
+ protected final Logger logger;
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
@@ -58,7 +58,7 @@ public class ClusterStateObserver {
volatile boolean timedOut;
- public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) {
+ public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) {
this(clusterService, new TimeValue(60000), logger, contextHolder);
}
@@ -67,7 +67,7 @@ public class ClusterStateObserver {
* will fail any existing or new #waitForNextChange calls. Set to null
* to wait indefinitely
*/
- public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) {
+ public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
this.clusterService = clusterService;
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
this.timeOutValue = timeout;
diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
index 534f007e8b..b32e992c5a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
return clusterInfo;
}
- static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
+ static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
MetaData meta = state.getMetaData();
for (ShardStats s : stats) {
@@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
}
}
- static void fillDiskUsagePerNode(ESLogger logger, List<NodeStats> nodeStatsArray,
+ static void fillDiskUsagePerNode(Logger logger, List<NodeStats> nodeStatsArray,
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages,
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
for (NodeStats nodeStats : nodeStatsArray) {
@@ -415,9 +415,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
if (leastAvailablePath == null) {
assert mostAvailablePath == null;
mostAvailablePath = leastAvailablePath = info;
- } else if (leastAvailablePath.getAvailable().bytes() > info.getAvailable().bytes()){
+ } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()){
leastAvailablePath = info;
- } else if (mostAvailablePath.getAvailable().bytes() < info.getAvailable().bytes()) {
+ } else if (mostAvailablePath.getAvailable().getBytes() < info.getAvailable().getBytes()) {
mostAvailablePath = info;
}
}
@@ -428,21 +428,21 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(),
leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
}
- if (leastAvailablePath.getTotal().bytes() < 0) {
+ if (leastAvailablePath.getTotal().getBytes() < 0) {
if (logger.isTraceEnabled()) {
logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
- nodeId, leastAvailablePath.getTotal().bytes());
+ nodeId, leastAvailablePath.getTotal().getBytes());
}
} else {
- newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
+ newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes()));
}
- if (mostAvailablePath.getTotal().bytes() < 0) {
+ if (mostAvailablePath.getTotal().getBytes() < 0) {
if (logger.isTraceEnabled()) {
logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
- nodeId, mostAvailablePath.getTotal().bytes());
+ nodeId, mostAvailablePath.getTotal().getBytes());
}
} else {
- newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
+ newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes()));
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
index a487bda0db..99f161b9da 100644
--- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.cluster;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@@ -91,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
try {
transportService.disconnectFromNode(node);
} catch (Exception e) {
- logger.warn("failed to disconnect to node [{}]", e, node);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
}
}
}
@@ -113,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
- logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
+ final int finalNodeFailureCount = nodeFailureCount;
+ logger.warn(
+ (Supplier<?>)
+ () -> new ParameterizedMessage(
+ "failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
}
nodes.put(node, nodeFailureCount);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 99b19e31a4..ce6473ecb4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -19,6 +19,9 @@
package org.elasticsearch.cluster.action.shard;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -34,8 +37,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
+import org.elasticsearch.cluster.routing.allocation.StaleShard;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
@@ -43,7 +46,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
@@ -108,7 +110,7 @@ public class ShardStateAction extends AbstractComponent {
if (isMasterChannelException(exp)) {
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
} else {
- logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
}
}
@@ -127,29 +129,32 @@ public class ShardStateAction extends AbstractComponent {
}
/**
- * Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node.
+ * Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node. This means
+ * that the shard should be failed because a write made it into the primary but was not replicated to this shard copy. If the shard
+ * does not exist anymore but still has an entry in the in-sync set, remove its allocation id from the in-sync set.
*
- * @param shardRouting the shard to fail
- * @param primaryTerm the primary term associated with the primary shard that is failing the shard.
+ * @param shardId shard id of the shard to fail
+ * @param allocationId allocation id of the shard to fail
+ * @param primaryTerm the primary term associated with the primary shard that is failing the shard. Must be strictly positive.
* @param message the reason for the failure
* @param failure the underlying cause of the failure
* @param listener callback upon completion of the request
*/
- public void remoteShardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
+ public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
assert primaryTerm > 0L : "primary term should be strictly positive";
- shardFailed(shardRouting, primaryTerm, message, failure, listener);
+ shardFailed(shardId, allocationId, primaryTerm, message, failure, listener);
}
/**
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
*/
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) {
- shardFailed(shardRouting, 0L, message, failure, listener);
+ shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener);
}
- private void shardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
+ private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
- ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message, failure);
+ ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure);
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
}
@@ -166,7 +171,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onClusterServiceClose() {
- logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@@ -181,9 +186,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
- private final ESLogger logger;
+ private final Logger logger;
- public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) {
+ public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
this.logger = logger;
@@ -191,7 +196,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
- logger.warn("{} received shard failed for {}", request.failure, request.shardId, request);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
clusterService.submitStateUpdateTask(
"shard-failed",
request,
@@ -200,12 +205,12 @@ public class ShardStateAction extends AbstractComponent {
new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
- logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
try {
channel.sendResponse(e);
} catch (Exception channelException) {
channelException.addSuppressed(e);
- logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
}
}
@@ -215,7 +220,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(new NotMasterException(source));
} catch (Exception channelException) {
- logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
}
}
@@ -224,7 +229,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Exception channelException) {
- logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
}
}
}
@@ -235,9 +240,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
private final AllocationService allocationService;
private final RoutingService routingService;
- private final ESLogger logger;
+ private final Logger logger;
- public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
+ public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) {
this.allocationService = allocationService;
this.routingService = routingService;
this.logger = logger;
@@ -247,17 +252,24 @@ public class ShardStateAction extends AbstractComponent {
public BatchResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
BatchResult.Builder<ShardEntry> batchResultBuilder = BatchResult.builder();
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
- List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>();
- Set<ShardRouting> seenShardRoutings = new HashSet<>(); // to prevent duplicates
+ List<FailedShard> failedShardsToBeApplied = new ArrayList<>();
+ List<StaleShard> staleShardsToBeApplied = new ArrayList<>();
for (ShardEntry task : tasks) {
IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex());
if (indexMetaData == null) {
- // tasks that correspond to non-existent shards are marked as successful
+ // tasks that correspond to non-existent indices are marked as successful
logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex());
batchResultBuilder.success(task);
} else {
- // non-local requests
+ // The primary term is 0 if the shard failed itself. It is > 0 if a write was done on a primary but was failed to be
+ // replicated to the shard copy with the provided allocation id. In case where the shard failed itself, it's ok to just
+ // remove the corresponding routing entry from the routing table. In case where a write could not be replicated,
+ // however, it is important to ensure that the shard copy with the missing write is considered as stale from that point
+ // on, which is implemented by removing the allocation id of the shard copy from the in-sync allocations set.
+ // We check here that the primary to which the write happened was not already failed in an earlier cluster state update.
+ // This prevents situations where a new primary has already been selected and replication failures from an old stale
+ // primary unnecessarily fail currently active shards.
if (task.primaryTerm > 0) {
long currentPrimaryTerm = indexMetaData.primaryTerm(task.shardId.id());
if (currentPrimaryTerm != task.primaryTerm) {
@@ -274,34 +286,35 @@ public class ShardStateAction extends AbstractComponent {
ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId);
if (matched == null) {
- // tasks that correspond to non-existent shards are marked as successful
- logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task);
- batchResultBuilder.success(task);
- } else {
- // remove duplicate actions as allocation service expects a clean list without duplicates
- if (seenShardRoutings.contains(matched)) {
- logger.trace("{} ignoring shard failed task [{}] (already scheduled to fail {})", task.shardId, task, matched);
+ Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(task.shardId.id());
+ // mark shard copies without routing entries that are in in-sync allocations set only as stale if the reason why
+ // they were failed is because a write made it into the primary but not to this copy (which corresponds to
+ // the check "primaryTerm > 0").
+ if (task.primaryTerm > 0 && inSyncAllocationIds.contains(task.allocationId)) {
+ logger.debug("{} marking shard {} as stale (shard failed task: [{}])", task.shardId, task.allocationId, task);
tasksToBeApplied.add(task);
+ staleShardsToBeApplied.add(new StaleShard(task.shardId, task.allocationId));
} else {
- logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task);
- tasksToBeApplied.add(task);
- shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(matched, task.message, task.failure));
- seenShardRoutings.add(matched);
+ // tasks that correspond to non-existent shards are marked as successful
+ logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task);
+ batchResultBuilder.success(task);
}
+ } else {
+ // failing a shard also possibly marks it as stale (see IndexMetaDataUpdater)
+ logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task);
+ tasksToBeApplied.add(task);
+ failedShardsToBeApplied.add(new FailedShard(matched, task.message, task.failure));
}
}
}
- assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size();
+ assert tasksToBeApplied.size() == failedShardsToBeApplied.size() + staleShardsToBeApplied.size();
ClusterState maybeUpdatedState = currentState;
try {
- RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied);
- if (result.changed()) {
- maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
- }
+ maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied);
batchResultBuilder.successes(tasksToBeApplied);
} catch (Exception e) {
- logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e);
// failures are communicated back to the requester
// cluster state will not be updated in this case
batchResultBuilder.failures(tasksToBeApplied, e);
@@ -311,8 +324,8 @@ public class ShardStateAction extends AbstractComponent {
}
// visible for testing
- RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards) {
- return allocationService.applyFailedShards(currentState, failedShards);
+ ClusterState applyFailedShards(ClusterState currentState, List<FailedShard> failedShards, List<StaleShard> staleShards) {
+ return allocationService.applyFailedShards(currentState, failedShards, staleShards);
}
@Override
@@ -337,9 +350,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
- private final ESLogger logger;
+ private final Logger logger;
- public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) {
+ public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
this.logger = logger;
@@ -360,9 +373,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
private final AllocationService allocationService;
- private final ESLogger logger;
+ private final Logger logger;
- public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) {
+ public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) {
this.allocationService = allocationService;
this.logger = logger;
}
@@ -409,14 +422,10 @@ public class ShardStateAction extends AbstractComponent {
ClusterState maybeUpdatedState = currentState;
try {
- RoutingAllocation.Result result =
- allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
- if (result.changed()) {
- maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
- }
+ maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied);
builder.successes(tasksToBeApplied);
} catch (Exception e) {
- logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
builder.failures(tasksToBeApplied, e);
}
@@ -425,7 +434,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
index 447e1582f2..9bc483ace6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
+++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java
@@ -54,7 +54,7 @@ public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, W
for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
int shardId = shardRoutingTable.shardId().id();
- shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable, indexMetaData));
+ shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable));
}
// update the index status
diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
index 62fe835753..12131b11f3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
+++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java
@@ -19,12 +19,11 @@
package org.elasticsearch.cluster.health;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
-import org.elasticsearch.cluster.routing.UnassignedInfo.Reason;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -41,7 +40,7 @@ public final class ClusterShardHealth implements Writeable {
private final int unassignedShards;
private final boolean primaryActive;
- public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
+ public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) {
this.shardId = shardId;
int computeActiveShards = 0;
int computeRelocatingShards = 0;
@@ -69,7 +68,7 @@ public final class ClusterShardHealth implements Writeable {
computeStatus = ClusterHealthStatus.YELLOW;
}
} else {
- computeStatus = getInactivePrimaryHealth(primaryRouting, indexMetaData);
+ computeStatus = getInactivePrimaryHealth(primaryRouting);
}
this.status = computeStatus;
this.activeShards = computeActiveShards;
@@ -131,28 +130,25 @@ public final class ClusterShardHealth implements Writeable {
/**
* Checks if an inactive primary shard should cause the cluster health to go RED.
*
- * Normally, an inactive primary shard in an index should cause the cluster health to be RED. However,
- * there are exceptions where a health status of RED is inappropriate, namely in these scenarios:
- * 1. Index Creation. When an index is first created, the primary shards are in the initializing state, so
- * there is a small window where the cluster health is RED due to the primaries not being activated yet.
- * However, this leads to a false sense that the cluster is in an unhealthy state, when in reality, its
- * simply a case of needing to wait for the primaries to initialize.
- * 2. When a cluster is in the recovery state, and the shard never had any allocation ids assigned to it,
- * which indicates the index was created and before allocation of the primary occurred for this shard,
- * a cluster restart happened.
- *
- * Here, we check for these scenarios and set the cluster health to YELLOW if any are applicable.
+ * An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is
+ * unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle,
+ * cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead.
+ * However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at
+ * some point, cluster health should still turn RED.
*
* NB: this method should *not* be called on active shards nor on non-primary shards.
*/
- public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting, final IndexMetaData indexMetaData) {
+ public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) {
assert shardRouting.primary() : "cannot invoke on a replica shard: " + shardRouting;
assert shardRouting.active() == false : "cannot invoke on an active shard: " + shardRouting;
assert shardRouting.unassignedInfo() != null : "cannot invoke on a shard with no UnassignedInfo: " + shardRouting;
+ assert shardRouting.recoverySource() != null : "cannot invoke on a shard that has no recovery source" + shardRouting;
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
- if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO
- && shardRouting.allocatedPostIndexCreate(indexMetaData) == false
- && (unassignedInfo.getReason() == Reason.INDEX_CREATED || unassignedInfo.getReason() == Reason.CLUSTER_RECOVERED)) {
+ RecoverySource.Type recoveryType = shardRouting.recoverySource().getType();
+ if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO && unassignedInfo.getNumFailedAllocations() == 0
+ && (recoveryType == RecoverySource.Type.EMPTY_STORE
+ || recoveryType == RecoverySource.Type.LOCAL_SHARDS
+ || recoveryType == RecoverySource.Type.SNAPSHOT)) {
return ClusterHealthStatus.YELLOW;
} else {
return ClusterHealthStatus.RED;
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java
index a039ce8041..ff49d07281 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java
@@ -19,213 +19,167 @@
package org.elasticsearch.cluster.metadata;
-import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.index.query.QueryBuilder;
-
-import java.io.IOException;
-import java.util.Map;
+import org.elasticsearch.common.Strings;
/**
- *
+ * Individual operation to perform on the cluster state as part of an {@link IndicesAliasesRequest}.
*/
-public class AliasAction implements Streamable {
-
- public static enum Type {
- ADD((byte) 0),
- REMOVE((byte) 1);
-
- private final byte value;
-
- Type(byte value) {
- this.value = value;
- }
-
- public byte value() {
- return value;
- }
+public abstract class AliasAction {
+ private final String index;
- public static Type fromValue(byte value) {
- if (value == 0) {
- return ADD;
- } else if (value == 1) {
- return REMOVE;
- } else {
- throw new IllegalArgumentException("No type for action [" + value + "]");
- }
+ private AliasAction(String index) {
+ if (false == Strings.hasText(index)) {
+ throw new IllegalArgumentException("[index] is required");
}
- }
-
- private Type actionType;
-
- private String index;
-
- private String alias;
-
- @Nullable
- private String filter;
-
- @Nullable
- private String indexRouting;
-
- @Nullable
- private String searchRouting;
-
- private AliasAction() {
-
- }
-
- public AliasAction(AliasAction other) {
- this.actionType = other.actionType;
- this.index = other.index;
- this.alias = other.alias;
- this.filter = other.filter;
- this.indexRouting = other.indexRouting;
- this.searchRouting = other.searchRouting;
- }
-
- public AliasAction(Type actionType) {
- this.actionType = actionType;
- }
-
- public AliasAction(Type actionType, String index, String alias) {
- this.actionType = actionType;
- this.index = index;
- this.alias = alias;
- }
-
- public AliasAction(Type actionType, String index, String alias, String filter) {
- this.actionType = actionType;
this.index = index;
- this.alias = alias;
- this.filter = filter;
}
- public Type actionType() {
- return actionType;
- }
-
- public AliasAction index(String index) {
- this.index = index;
- return this;
- }
-
- public String index() {
+ /**
+ * Get the index on which the operation should act.
+ */
+ public String getIndex() {
return index;
}
-
- public AliasAction alias(String alias) {
- this.alias = alias;
- return this;
- }
-
- public String alias() {
- return alias;
- }
- public String filter() {
- return filter;
- }
-
- public AliasAction filter(String filter) {
- this.filter = filter;
- return this;
- }
-
- public AliasAction filter(Map<String, Object> filter) {
- if (filter == null || filter.isEmpty()) {
- this.filter = null;
- return this;
- }
- try {
- XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
- builder.map(filter);
- this.filter = builder.string();
- return this;
- } catch (IOException e) {
- throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
+ /**
+ * Should this action remove the index? Actions that return true from this will never execute
+ * {@link #apply(NewAliasValidator, MetaData.Builder, IndexMetaData)}.
+ */
+ abstract boolean removeIndex();
+
+ /**
+ * Apply the action.
+ *
+ * @param aliasValidator call to validate a new alias before adding it to the builder
+ * @param metadata metadata builder for the changes made by all actions as part of this request
+ * @param index metadata for the index being changed
+ * @return did this action make any changes?
+ */
+ abstract boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index);
+
+ /**
+ * Validate a new alias.
+ */
+ @FunctionalInterface
+ public interface NewAliasValidator {
+ void validate(String alias, @Nullable String indexRouting, @Nullable String filter);
+ }
+
+ /**
+ * Operation to add an alias to an index.
+ */
+ public static class Add extends AliasAction {
+ private final String alias;
+
+ @Nullable
+ private final String filter;
+
+ @Nullable
+ private final String indexRouting;
+
+ @Nullable
+ private final String searchRouting;
+
+ /**
+ * Build the operation.
+ */
+ public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, @Nullable String searchRouting) {
+ super(index);
+ if (false == Strings.hasText(alias)) {
+ throw new IllegalArgumentException("[alias] is required");
+ }
+ this.alias = alias;
+ this.filter = filter;
+ this.indexRouting = indexRouting;
+ this.searchRouting = searchRouting;
}
- }
- public AliasAction filter(QueryBuilder queryBuilder) {
- if (queryBuilder == null) {
- this.filter = null;
- return this;
- }
- try {
- XContentBuilder builder = XContentFactory.jsonBuilder();
- queryBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
- builder.close();
- this.filter = builder.string();
- return this;
- } catch (IOException e) {
- throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
+ /**
+ * Alias to add to the index.
+ */
+ public String getAlias() {
+ return alias;
}
- }
- public AliasAction routing(String routing) {
- this.indexRouting = routing;
- this.searchRouting = routing;
- return this;
- }
+ @Override
+ boolean removeIndex() {
+ return false;
+ }
- public String indexRouting() {
- return indexRouting;
+ @Override
+ boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
+ aliasValidator.validate(alias, indexRouting, filter);
+ AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).filter(filter).indexRouting(indexRouting)
+ .searchRouting(searchRouting).build();
+ // Check if this alias already exists
+ AliasMetaData currentAliasMd = index.getAliases().get(alias);
+ if (currentAliasMd != null && currentAliasMd.equals(newAliasMd)) {
+ // It already exists, ignore it
+ return false;
+ }
+ metadata.put(IndexMetaData.builder(index).putAlias(newAliasMd));
+ return true;
+ }
}
- public AliasAction indexRouting(String indexRouting) {
- this.indexRouting = indexRouting;
- return this;
- }
+ /**
+ * Operation to remove an alias from an index.
+ */
+ public static class Remove extends AliasAction {
+ private final String alias;
- public String searchRouting() {
- return searchRouting;
- }
+ /**
+ * Build the operation.
+ */
+ public Remove(String index, String alias) {
+ super(index);
+ if (false == Strings.hasText(alias)) {
+ throw new IllegalArgumentException("[alias] is required");
+ }
+ this.alias = alias;
+ }
- public AliasAction searchRouting(String searchRouting) {
- this.searchRouting = searchRouting;
- return this;
- }
+ /**
+ * Alias to remove from the index.
+ */
+ public String getAlias() {
+ return alias;
+ }
- public static AliasAction readAliasAction(StreamInput in) throws IOException {
- AliasAction aliasAction = new AliasAction();
- aliasAction.readFrom(in);
- return aliasAction;
- }
+ @Override
+ boolean removeIndex() {
+ return false;
+ }
- @Override
- public void readFrom(StreamInput in) throws IOException {
- actionType = Type.fromValue(in.readByte());
- index = in.readOptionalString();
- alias = in.readOptionalString();
- filter = in.readOptionalString();
- indexRouting = in.readOptionalString();
- searchRouting = in.readOptionalString();
+ @Override
+ boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
+ if (false == index.getAliases().containsKey(alias)) {
+ return false;
+ }
+ metadata.put(IndexMetaData.builder(index).removeAlias(alias));
+ return true;
+ }
}
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeByte(actionType.value());
- out.writeOptionalString(index);
- out.writeOptionalString(alias);
- out.writeOptionalString(filter);
- out.writeOptionalString(indexRouting);
- out.writeOptionalString(searchRouting);
- }
+ /**
+ * Operation to remove an index. This is an "alias action" because it allows us to remove an index at the same time as we remove add an
+ * alias to replace it.
+ */
+ public static class RemoveIndex extends AliasAction {
+ public RemoveIndex(String index) {
+ super(index);
+ }
- public static AliasAction newAddAliasAction(String index, String alias) {
- return new AliasAction(Type.ADD, index, alias);
- }
+ @Override
+ boolean removeIndex() {
+ return true;
+ }
- public static AliasAction newRemoveAliasAction(String index, String alias) {
- return new AliasAction(Type.REMOVE, index, alias);
+ @Override
+ boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
+ throw new UnsupportedOperationException();
+ }
}
-
-}
+} \ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
index d98187cc6c..cb46b22fe7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
@@ -20,6 +20,7 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
@@ -33,6 +34,7 @@ import org.elasticsearch.indices.InvalidAliasNameException;
import java.io.IOException;
import java.util.Optional;
+import java.util.function.Function;
/**
* Validator for an alias, to be used before adding an alias to the index metadata
@@ -46,21 +48,12 @@ public class AliasValidator extends AbstractComponent {
}
/**
- * Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasAction} and make sure
- * it's valid before it gets added to the index metadata. Doesn't validate the alias filter.
- * @throws IllegalArgumentException if the alias is not valid
- */
- public void validateAliasAction(AliasAction aliasAction, MetaData metaData) {
- validateAlias(aliasAction.alias(), aliasAction.index(), aliasAction.indexRouting(), metaData);
- }
-
- /**
* Allows to validate an {@link org.elasticsearch.action.admin.indices.alias.Alias} and make sure
* it's valid before it gets added to the index metadata. Doesn't validate the alias filter.
* @throws IllegalArgumentException if the alias is not valid
*/
public void validateAlias(Alias alias, String index, MetaData metaData) {
- validateAlias(alias.name(), index, alias.indexRouting(), metaData);
+ validateAlias(alias.name(), index, alias.indexRouting(), name -> metaData.index(name));
}
/**
@@ -69,7 +62,7 @@ public class AliasValidator extends AbstractComponent {
* @throws IllegalArgumentException if the alias is not valid
*/
public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) {
- validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData);
+ validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), name -> metaData.index(name));
}
/**
@@ -90,16 +83,19 @@ public class AliasValidator extends AbstractComponent {
}
}
- private void validateAlias(String alias, String index, String indexRouting, MetaData metaData) {
+ /**
+ * Validate a proposed alias.
+ */
+ public void validateAlias(String alias, String index, @Nullable String indexRouting, Function<String, IndexMetaData> indexLookup) {
validateAliasStandalone(alias, indexRouting);
if (!Strings.hasText(index)) {
throw new IllegalArgumentException("index name is required");
}
- assert metaData != null;
- if (metaData.hasIndex(alias)) {
- throw new InvalidAliasNameException(metaData.index(alias).getIndex(), alias, "an index exists with the same name as the alias");
+ IndexMetaData indexNamedSameAsAlias = indexLookup.apply(alias);
+ if (indexNamedSameAsAlias != null) {
+ throw new InvalidAliasNameException(indexNamedSameAsAlias.getIndex(), alias, "an index exists with the same name as the alias");
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
index 94dd3c63da..f6a54fc82d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
@@ -37,11 +37,11 @@ import org.elasticsearch.index.Index;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Objects;
-import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
@@ -219,7 +219,7 @@ public final class IndexGraveyard implements MetaData.Custom {
/**
* Add a set of deleted indexes to the list of tombstones in the cluster state.
*/
- public Builder addTombstones(final Index[] indices) {
+ public Builder addTombstones(final Collection<Index> indices) {
for (Index index : indices) {
addTombstone(index);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index 4f51fa9818..44db87d9b3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -157,10 +157,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
}
+ static Setting<Integer> buildNumberOfShardsSetting() {
+ /* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that
+ * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes
+ * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards per cluster.
+ * this also prevents creating stuff like a new index with millions of shards by accident which essentially kills the entire cluster
+ * with OOM on the spot.*/
+ final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024"));
+ if (maxNumShards < 1) {
+ throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0");
+ }
+ return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards,
+ Property.IndexScope);
+ }
+
public static final String INDEX_SETTING_PREFIX = "index.";
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
- public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING =
- Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope);
+ public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = buildNumberOfShardsSetting();
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
@@ -196,6 +209,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string";
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
public static final String SETTING_CREATION_DATE = "index.creation_date";
+ /**
+ * The user provided name for an index. This is the plain string provided by the user when the index was created.
+ * It might still contain date math expressions etc. (added in 5.0)
+ */
+ public static final String SETTING_INDEX_PROVIDED_NAME = "index.provided_name";
public static final String SETTING_PRIORITY = "index.priority";
public static final Setting<Integer> INDEX_PRIORITY_SETTING =
Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope);
@@ -232,7 +250,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1).numberOfReplicas(0).build();
- public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
+ public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations";
static final String KEY_VERSION = "version";
static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards";
static final String KEY_SETTINGS = "settings";
@@ -262,7 +280,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final ImmutableOpenMap<String, Custom> customs;
- private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
+ private final ImmutableOpenIntMap<Set<String>> inSyncAllocationIds;
private final transient int totalNumberOfShards;
@@ -279,7 +297,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
- ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
+ ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> inSyncAllocationIds,
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion,
int routingNumShards, ActiveShardCount waitForActiveShards) {
@@ -296,7 +314,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.mappings = mappings;
this.customs = customs;
this.aliases = aliases;
- this.activeAllocationIds = activeAllocationIds;
+ this.inSyncAllocationIds = inSyncAllocationIds;
this.requireFilters = requireFilters;
this.includeFilters = includeFilters;
this.excludeFilters = excludeFilters;
@@ -340,7 +358,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
* a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary.
*
* Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard
- * that can be indexed into) is larger than 0. See {@link IndexMetaDataUpdater#applyChanges(MetaData)}.
+ * that can be indexed into) is larger than 0. See {@link IndexMetaDataUpdater#applyChanges}.
**/
public long primaryTerm(int shardId) {
return this.primaryTerms[shardId];
@@ -447,13 +465,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return (T) customs.get(type);
}
- public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
- return activeAllocationIds;
+ public ImmutableOpenIntMap<Set<String>> getInSyncAllocationIds() {
+ return inSyncAllocationIds;
}
- public Set<String> activeAllocationIds(int shardId) {
+ public Set<String> inSyncAllocationIds(int shardId) {
assert shardId >= 0 && shardId < numberOfShards;
- return activeAllocationIds.get(shardId);
+ return inSyncAllocationIds.get(shardId);
}
@Nullable
@@ -518,7 +536,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
return false;
}
- if (!activeAllocationIds.equals(that.activeAllocationIds)) {
+ if (!inSyncAllocationIds.equals(that.inSyncAllocationIds)) {
return false;
}
return true;
@@ -536,7 +554,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
result = 31 * result + Long.hashCode(routingFactor);
result = 31 * result + Long.hashCode(routingNumShards);
result = 31 * result + Arrays.hashCode(primaryTerms);
- result = 31 * result + activeAllocationIds.hashCode();
+ result = 31 * result + inSyncAllocationIds.hashCode();
return result;
}
@@ -573,7 +591,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
private final Diff<ImmutableOpenMap<String, Custom>> customs;
- private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
+ private final Diff<ImmutableOpenIntMap<Set<String>>> inSyncAllocationIds;
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
index = after.index.getName();
@@ -585,7 +603,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
- activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
+ inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds,
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
}
@@ -610,7 +628,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return lookupPrototypeSafe(key).readDiffFrom(in);
}
});
- activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
+ inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
DiffableUtils.StringSetValueSerializer.getInstance());
}
@@ -625,7 +643,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
mappings.writeTo(out);
aliases.writeTo(out);
customs.writeTo(out);
- activeAllocationIds.writeTo(out);
+ inSyncAllocationIds.writeTo(out);
}
@Override
@@ -639,7 +657,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.mappings.putAll(mappings.apply(part.mappings));
builder.aliases.putAll(aliases.apply(part.aliases));
builder.customs.putAll(customs.apply(part.customs));
- builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
+ builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds));
return builder.build();
}
}
@@ -668,11 +686,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
- int activeAllocationIdsSize = in.readVInt();
- for (int i = 0; i < activeAllocationIdsSize; i++) {
+ int inSyncAllocationIdsSize = in.readVInt();
+ for (int i = 0; i < inSyncAllocationIdsSize; i++) {
int key = in.readVInt();
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
- builder.putActiveAllocationIds(key, allocationIds);
+ builder.putInSyncAllocationIds(key, allocationIds);
}
return builder.build();
}
@@ -698,8 +716,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
out.writeString(cursor.key);
cursor.value.writeTo(out);
}
- out.writeVInt(activeAllocationIds.size());
- for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
+ out.writeVInt(inSyncAllocationIds.size());
+ for (IntObjectCursor<Set<String>> cursor : inSyncAllocationIds) {
out.writeVInt(cursor.key);
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
}
@@ -723,7 +741,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, Custom> customs;
- private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
+ private final ImmutableOpenIntMap.Builder<Set<String>> inSyncAllocationIds;
private Integer routingNumShards;
public Builder(String index) {
@@ -731,7 +749,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.mappings = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder();
this.customs = ImmutableOpenMap.builder();
- this.activeAllocationIds = ImmutableOpenIntMap.builder();
+ this.inSyncAllocationIds = ImmutableOpenIntMap.builder();
}
public Builder(IndexMetaData indexMetaData) {
@@ -744,7 +762,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
this.routingNumShards = indexMetaData.routingNumShards;
- this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
+ this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds);
}
public String index() {
@@ -854,8 +872,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this;
}
- public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
- activeAllocationIds.put(shardId, new HashSet(allocationIds));
+ public Set<String> getInSyncAllocationIds(int shardId) {
+ return inSyncAllocationIds.get(shardId);
+ }
+
+ public Builder putInSyncAllocationIds(int shardId, Set<String> allocationIds) {
+ inSyncAllocationIds.put(shardId, new HashSet(allocationIds));
return this;
}
@@ -934,13 +956,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
}
- // fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
- ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
+ // fill missing slots in inSyncAllocationIds with empty set if needed and make all entries immutable
+ ImmutableOpenIntMap.Builder<Set<String>> filledInSyncAllocationIds = ImmutableOpenIntMap.builder();
for (int i = 0; i < numberOfShards; i++) {
- if (activeAllocationIds.containsKey(i)) {
- filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
+ if (inSyncAllocationIds.containsKey(i)) {
+ filledInSyncAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(inSyncAllocationIds.get(i))));
} else {
- filledActiveAllocationIds.put(i, Collections.emptySet());
+ filledInSyncAllocationIds.put(i, Collections.emptySet());
}
}
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
@@ -1001,7 +1023,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
- tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
+ tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), waitForActiveShards);
}
@@ -1052,8 +1074,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
builder.endArray();
- builder.startObject(KEY_ACTIVE_ALLOCATIONS);
- for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
+ builder.startObject(KEY_IN_SYNC_ALLOCATIONS);
+ for (IntObjectCursor<Set<String>> cursor : indexMetaData.inSyncAllocationIds) {
builder.startArray(String.valueOf(cursor.key));
for (String allocationId : cursor.value) {
builder.value(allocationId);
@@ -1104,7 +1126,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
}
- } else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
+ } else if (KEY_IN_SYNC_ALLOCATIONS.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@@ -1116,7 +1138,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
allocationIds.add(parser.text());
}
}
- builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
+ builder.putInSyncAllocationIds(Integer.valueOf(shardId), allocationIds);
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
index df53395fe2..6ecf7483d8 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
@@ -607,23 +607,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
add = false;
expression = expression.substring(1);
}
+ if (result == null) {
+ // add all the previous ones...
+ result = new HashSet<>(expressions.subList(0, i));
+ }
if (!Regex.isSimpleMatchPattern(expression)) {
if (!unavailableIgnoredOrExists(options, metaData, expression)) {
throw infe(expression);
}
- if (result != null) {
- if (add) {
- result.add(expression);
- } else {
- result.remove(expression);
- }
+ if (add) {
+ result.add(expression);
+ } else {
+ result.remove(expression);
}
continue;
}
- if (result == null) {
- // add all the previous ones...
- result = new HashSet<>(expressions.subList(0, i));
- }
final IndexMetaData.State excludeState = excludeState(options);
final Map<String, AliasOrIndex> matches = matches(metaData, expression);
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java
index 0ec3c5f863..6ed9664c92 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java
@@ -31,23 +31,4 @@ public interface IndexTemplateFilter {
* {@code false} otherwise.
*/
boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template);
-
- class Compound implements IndexTemplateFilter {
-
- private IndexTemplateFilter[] filters;
-
- Compound(IndexTemplateFilter... filters) {
- this.filters = filters;
- }
-
- @Override
- public boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template) {
- for (IndexTemplateFilter filter : filters) {
- if (!filter.apply(request, template)) {
- return false;
- }
- }
- return true;
- }
- }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
index 13d7e152ba..c11389d2dc 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
@@ -21,7 +21,9 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.compress.CompressedXContent;
@@ -37,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
/**
@@ -50,6 +53,26 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private final int order;
+ /**
+ * The version is an arbitrary number managed by the user so that they can easily and quickly verify the existence of a given template.
+ * Expected usage:
+ * <pre><code>
+ * PUT /_template/my_template
+ * {
+ * "template": "my_index-*",
+ * "mappings": { ... },
+ * "version": 1
+ * }
+ * </code></pre>
+ * Then, some process from the user can occasionally verify that the template exists with the appropriate version without having to
+ * check the template's content:
+ * <pre><code>
+ * GET /_template/my_template?filter_path=*.version
+ * </code></pre>
+ */
+ @Nullable
+ private final Integer version;
+
private final String template;
private final Settings settings;
@@ -61,10 +84,14 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
- public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedXContent> mappings,
- ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
+ public IndexTemplateMetaData(String name, int order, Integer version,
+ String template, Settings settings,
+ ImmutableOpenMap<String, CompressedXContent> mappings,
+ ImmutableOpenMap<String, AliasMetaData> aliases,
+ ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
this.name = name;
this.order = order;
+ this.version = version;
this.template = template;
this.settings = settings;
this.mappings = mappings;
@@ -84,6 +111,16 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
return order();
}
+ @Nullable
+ public Integer getVersion() {
+ return version();
+ }
+
+ @Nullable
+ public Integer version() {
+ return version;
+ }
+
public String getName() {
return this.name;
}
@@ -150,13 +187,14 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
if (!settings.equals(that.settings)) return false;
if (!template.equals(that.template)) return false;
- return true;
+ return Objects.equals(version, that.version);
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + order;
+ result = 31 * result + Objects.hashCode(version);
result = 31 * result + template.hashCode();
result = 31 * result + settings.hashCode();
result = 31 * result + mappings.hashCode();
@@ -184,6 +222,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
+ if (in.getVersion().onOrAfter(Version.V_5_0_0_beta1)) {
+ builder.version(in.readOptionalVInt());
+ }
return builder.build();
}
@@ -207,6 +248,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
out.writeString(cursor.key);
cursor.value.writeTo(out);
}
+ if (out.getVersion().onOrAfter(Version.V_5_0_0_beta1)) {
+ out.writeOptionalVInt(version);
+ }
}
public static class Builder {
@@ -220,6 +264,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private int order;
+ private Integer version;
+
private String template;
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
@@ -240,6 +286,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
public Builder(IndexTemplateMetaData indexTemplateMetaData) {
this.name = indexTemplateMetaData.name();
order(indexTemplateMetaData.order());
+ version(indexTemplateMetaData.version());
template(indexTemplateMetaData.template());
settings(indexTemplateMetaData.settings());
@@ -253,6 +300,11 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
return this;
}
+ public Builder version(Integer version) {
+ this.version = version;
+ return this;
+ }
+
public Builder template(String template) {
this.template = template;
return this;
@@ -312,14 +364,18 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
}
public IndexTemplateMetaData build() {
- return new IndexTemplateMetaData(name, order, template, settings, mappings.build(), aliases.build(), customs.build());
+ return new IndexTemplateMetaData(name, order, version, template, settings, mappings.build(), aliases.build(), customs.build());
}
@SuppressWarnings("unchecked")
- public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params)
+ throws IOException {
builder.startObject(indexTemplateMetaData.name());
builder.field("order", indexTemplateMetaData.order());
+ if (indexTemplateMetaData.version() != null) {
+ builder.field("version", indexTemplateMetaData.version());
+ }
builder.field("template", indexTemplateMetaData.template());
builder.startObject("settings");
@@ -380,7 +436,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
Settings.Builder templateSettingsBuilder = Settings.builder();
- templateSettingsBuilder.put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
+ templateSettingsBuilder.put(
+ SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()))
+ .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
builder.settings(templateSettingsBuilder.build());
} else if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@@ -388,7 +446,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
- Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
+ Map<String, Object> mappingSource =
+ MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).string());
}
}
@@ -428,6 +487,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
builder.template(parser.text());
} else if ("order".equals(currentFieldName)) {
builder.order(parser.intValue());
+ } else if ("version".equals(currentFieldName)) {
+ builder.version(parser.intValue());
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index 29bb55f810..fd7e08fec3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
@@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -758,7 +758,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
- public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) {
+ public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey();
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
index 04316cbc63..373c62ab11 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
@@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@@ -44,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
@@ -102,13 +103,11 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C
public class MetaDataCreateIndexService extends AbstractComponent {
public static final int MAX_INDEX_NAME_BYTES = 255;
- private static final DefaultIndexTemplateFilter DEFAULT_INDEX_TEMPLATE_FILTER = new DefaultIndexTemplateFilter();
private final ClusterService clusterService;
private final IndicesService indicesService;
private final AllocationService allocationService;
private final AliasValidator aliasValidator;
- private final IndexTemplateFilter indexTemplateFilter;
private final Environment env;
private final NodeServicesProvider nodeServicesProvider;
private final IndexScopedSettings indexScopedSettings;
@@ -117,8 +116,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Inject
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
IndicesService indicesService, AllocationService allocationService,
- AliasValidator aliasValidator,
- Set<IndexTemplateFilter> indexTemplateFilters, Environment env,
+ AliasValidator aliasValidator, Environment env,
NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings,
ThreadPool threadPool) {
super(settings);
@@ -129,22 +127,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
this.env = env;
this.nodeServicesProvider = nodeServicesProvider;
this.indexScopedSettings = indexScopedSettings;
-
- if (indexTemplateFilters.isEmpty()) {
- this.indexTemplateFilter = DEFAULT_INDEX_TEMPLATE_FILTER;
- } else {
- IndexTemplateFilter[] templateFilters = new IndexTemplateFilter[indexTemplateFilters.size() + 1];
- templateFilters[0] = DEFAULT_INDEX_TEMPLATE_FILTER;
- int i = 1;
- for (IndexTemplateFilter indexTemplateFilter : indexTemplateFilters) {
- templateFilters[i++] = indexTemplateFilter;
- }
- this.indexTemplateFilter = new IndexTemplateFilter.Compound(templateFilters);
- }
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
}
- public void validateIndexName(String index, ClusterState state) {
+ public static void validateIndexName(String index, ClusterState state) {
if (state.routingTable().hasIndex(index)) {
throw new IndexAlreadyExistsException(state.routingTable().index(index).getIndex());
}
@@ -157,8 +143,8 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (index.contains("#")) {
throw new InvalidIndexNameException(index, "must not contain '#'");
}
- if (index.charAt(0) == '_') {
- throw new InvalidIndexNameException(index, "must not start with '_'");
+ if (index.charAt(0) == '_' || index.charAt(0) == '-' || index.charAt(0) == '+') {
+ throw new InvalidIndexNameException(index, "must not start with '_', '-', or '+'");
}
if (!index.toLowerCase(Locale.ROOT).equals(index)) {
throw new InvalidIndexNameException(index, "must be lowercase");
@@ -242,7 +228,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// we only find a template when its an API call (a new index)
// find templates, highest order are better matching
- List<IndexTemplateMetaData> templates = findTemplates(request, currentState, indexTemplateFilter);
+ List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
Map<String, Custom> customs = new HashMap<>();
@@ -332,7 +318,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
}
-
+ indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
final Index shrinkFromIndex = request.shrinkFrom();
int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());;
@@ -443,10 +429,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
.addAsNew(updatedState.metaData().index(request.index()));
- RoutingAllocation.Result routingResult = allocationService.reroute(
+ updatedState = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"index [" + request.index() + "] created");
- updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
}
removalReason = "cleaning up after validating index on master";
return updatedState;
@@ -461,20 +446,20 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
if (e instanceof IndexAlreadyExistsException) {
- logger.trace("[{}] failed to create", e, request.index());
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else {
- logger.debug("[{}] failed to create", e, request.index());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
}
super.onFailure(source, e);
}
});
}
- private List<IndexTemplateMetaData> findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state, IndexTemplateFilter indexTemplateFilter) throws IOException {
+ private List<IndexTemplateMetaData> findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws IOException {
List<IndexTemplateMetaData> templates = new ArrayList<>();
for (ObjectCursor<IndexTemplateMetaData> cursor : state.metaData().templates().values()) {
IndexTemplateMetaData template = cursor.value;
- if (indexTemplateFilter.apply(request, template)) {
+ if (Regex.simpleMatch(template.template(), request.index())) {
templates.add(template);
}
}
@@ -513,25 +498,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
}
}
- //norelease - this can be removed?
- Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
- Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
- if (number_of_primaries != null && number_of_primaries <= 0) {
- validationErrors.add("index must have 1 or more primary shards");
- }
- if (number_of_replicas != null && number_of_replicas < 0) {
- validationErrors.add("index must have 0 or more replica shards");
- }
return validationErrors;
}
- private static class DefaultIndexTemplateFilter implements IndexTemplateFilter {
- @Override
- public boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template) {
- return Regex.simpleMatch(template.template(), request.index());
- }
- }
-
/**
* Validates the settings and mappings for shrinking an index.
* @return the list of nodes at least one instance of the source index shards are allocated
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
index 7e97d79243..22553dd992 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
@@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
@@ -37,11 +36,13 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.snapshots.SnapshotsService;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Set;
-import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.toSet;
/**
- *
+ * Deletes indices.
*/
public class MetaDataDeleteIndexService extends AbstractComponent {
@@ -56,7 +57,8 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
this.allocationService = allocationService;
}
- public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
+ public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request,
+ final ActionListener<ClusterStateUpdateResponse> listener) {
if (request.indices() == null || request.indices().length == 0) {
throw new IllegalArgumentException("Index name is required");
}
@@ -71,37 +73,46 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
@Override
public ClusterState execute(final ClusterState currentState) {
- final MetaData meta = currentState.metaData();
- final Index[] indices = request.indices();
- final Set<IndexMetaData> metaDatas = Arrays.asList(indices).stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet());
- // Check if index deletion conflicts with any running snapshots
- SnapshotsService.checkIndexDeletion(currentState, metaDatas);
- RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
- MetaData.Builder metaDataBuilder = MetaData.builder(meta);
- ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
-
- final IndexGraveyard.Builder graveyardBuilder = IndexGraveyard.builder(metaDataBuilder.indexGraveyard());
- final int previousGraveyardSize = graveyardBuilder.tombstones().size();
- for (final Index index : indices) {
- String indexName = index.getName();
- logger.debug("[{}] deleting index", index);
- routingTableBuilder.remove(indexName);
- clusterBlocksBuilder.removeIndexBlocks(indexName);
- metaDataBuilder.remove(indexName);
- }
- // add tombstones to the cluster state for each deleted index
- final IndexGraveyard currentGraveyard = graveyardBuilder.addTombstones(indices).build(settings);
- metaDataBuilder.indexGraveyard(currentGraveyard); // the new graveyard set on the metadata
- logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.",
- graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size());
-
- MetaData newMetaData = metaDataBuilder.build();
- ClusterBlocks blocks = clusterBlocksBuilder.build();
- RoutingAllocation.Result routingResult = allocationService.reroute(
- ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build(),
- "deleted indices [" + indices + "]");
- return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
+ return deleteIndices(currentState, Arrays.asList(request.indices()));
}
});
}
+
+ /**
+ * Delete some indices from the cluster state.
+ */
+ public ClusterState deleteIndices(ClusterState currentState, Collection<Index> indices) {
+ final MetaData meta = currentState.metaData();
+ final Set<IndexMetaData> metaDatas = indices.stream().map(i -> meta.getIndexSafe(i)).collect(toSet());
+ // Check if index deletion conflicts with any running snapshots
+ SnapshotsService.checkIndexDeletion(currentState, metaDatas);
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
+ MetaData.Builder metaDataBuilder = MetaData.builder(meta);
+ ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
+
+ final IndexGraveyard.Builder graveyardBuilder = IndexGraveyard.builder(metaDataBuilder.indexGraveyard());
+ final int previousGraveyardSize = graveyardBuilder.tombstones().size();
+ for (final Index index : indices) {
+ String indexName = index.getName();
+ logger.debug("[{}] deleting index", index);
+ routingTableBuilder.remove(indexName);
+ clusterBlocksBuilder.removeIndexBlocks(indexName);
+ metaDataBuilder.remove(indexName);
+ }
+ // add tombstones to the cluster state for each deleted index
+ final IndexGraveyard currentGraveyard = graveyardBuilder.addTombstones(indices).build(settings);
+ metaDataBuilder.indexGraveyard(currentGraveyard); // the new graveyard set on the metadata
+ logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.",
+ graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size());
+
+ MetaData newMetaData = metaDataBuilder.build();
+ ClusterBlocks blocks = clusterBlocksBuilder.build();
+ return allocationService.reroute(
+ ClusterState.builder(currentState)
+ .routingTable(routingTableBuilder.build())
+ .metaData(newMetaData)
+ .blocks(blocks)
+ .build(),
+ "deleted indices [" + indices + "]");
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
index e39b86a161..c21454a09a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
@@ -20,11 +20,13 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.metadata.AliasAction.NewAliasValidator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
@@ -38,11 +40,16 @@ import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.IndicesService;
+import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+
+import static java.util.Collections.emptyList;
/**
* Service responsible for submitting add and remove aliases requests
@@ -57,108 +64,113 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
private final NodeServicesProvider nodeServicesProvider;
+ private final MetaDataDeleteIndexService deleteIndexService;
+
@Inject
- public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService, AliasValidator aliasValidator, NodeServicesProvider nodeServicesProvider) {
+ public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService,
+ AliasValidator aliasValidator, NodeServicesProvider nodeServicesProvider, MetaDataDeleteIndexService deleteIndexService) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
this.aliasValidator = aliasValidator;
this.nodeServicesProvider = nodeServicesProvider;
+ this.deleteIndexService = deleteIndexService;
}
- public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
- clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
+ public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request,
+ final ActionListener<ClusterStateUpdateResponse> listener) {
+ clusterService.submitStateUpdateTask("index-aliases",
+ new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
- public ClusterState execute(final ClusterState currentState) {
- List<Index> indicesToClose = new ArrayList<>();
- Map<String, IndexService> indices = new HashMap<>();
- try {
- for (AliasAction aliasAction : request.actions()) {
- aliasValidator.validateAliasAction(aliasAction, currentState.metaData());
- if (!currentState.metaData().hasIndex(aliasAction.index())) {
- throw new IndexNotFoundException(aliasAction.index());
- }
- }
+ public ClusterState execute(ClusterState currentState) {
+ return innerExecute(currentState, request.actions());
+ }
+ });
+ }
- boolean changed = false;
- MetaData.Builder builder = MetaData.builder(currentState.metaData());
- for (AliasAction aliasAction : request.actions()) {
- IndexMetaData indexMetaData = builder.get(aliasAction.index());
- if (indexMetaData == null) {
- throw new IndexNotFoundException(aliasAction.index());
- }
- // TODO: not copy (putAll)
- IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
- if (aliasAction.actionType() == AliasAction.Type.ADD) {
- String filter = aliasAction.filter();
- if (Strings.hasLength(filter)) {
- // parse the filter, in order to validate it
- IndexService indexService = indices.get(indexMetaData.getIndex());
- if (indexService == null) {
- indexService = indicesService.indexService(indexMetaData.getIndex());
- if (indexService == null) {
- // temporarily create the index and add mappings so we can parse the filter
- try {
- indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
- for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
- MappingMetaData mappingMetaData = cursor.value;
- indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
- }
- } catch (Exception e) {
- logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
- continue;
- }
- indicesToClose.add(indexMetaData.getIndex());
- }
- indices.put(indexMetaData.getIndex().getName(), indexService);
+ ClusterState innerExecute(ClusterState currentState, Iterable<AliasAction> actions) {
+ List<Index> indicesToClose = new ArrayList<>();
+ Map<String, IndexService> indices = new HashMap<>();
+ try {
+ boolean changed = false;
+ // Gather all the indexes that must be removed first so:
+ // 1. We don't cause error when attempting to replace an index with a alias of the same name.
+ // 2. We don't allow removal of aliases from indexes that we're just going to delete anyway. That'd be silly.
+ Set<Index> indicesToDelete = new HashSet<>();
+ for (AliasAction action : actions) {
+ if (action.removeIndex()) {
+ IndexMetaData index = currentState.metaData().getIndices().get(action.getIndex());
+ if (index == null) {
+ throw new IndexNotFoundException(action.getIndex());
+ }
+ indicesToDelete.add(index.getIndex());
+ changed = true;
+ }
+ }
+ // Remove the indexes if there are any to remove
+ if (changed) {
+ currentState = deleteIndexService.deleteIndices(currentState, indicesToDelete);
+ }
+ MetaData.Builder metadata = MetaData.builder(currentState.metaData());
+ // Run the remaining alias actions
+ for (AliasAction action : actions) {
+ if (action.removeIndex()) {
+ // Handled above
+ continue;
+ }
+ IndexMetaData index = metadata.get(action.getIndex());
+ if (index == null) {
+ throw new IndexNotFoundException(action.getIndex());
+ }
+ NewAliasValidator newAliasValidator = (alias, indexRouting, filter) -> {
+ /* It is important that we look up the index using the metadata builder we are modifying so we can remove an
+ * index and replace it with an alias. */
+ Function<String, IndexMetaData> indexLookup = name -> metadata.get(name);
+ aliasValidator.validateAlias(alias, action.getIndex(), indexRouting, indexLookup);
+ if (Strings.hasLength(filter)) {
+ IndexService indexService = indices.get(index.getIndex());
+ if (indexService == null) {
+ indexService = indicesService.indexService(index.getIndex());
+ if (indexService == null) {
+ // temporarily create the index and add mappings so we can parse the filter
+ try {
+ indexService = indicesService.createIndex(nodeServicesProvider, index, emptyList());
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
}
-
- aliasValidator.validateAliasFilter(aliasAction.alias(), filter, indexService.newQueryShardContext());
- }
- AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(
- aliasAction.alias())
- .filter(filter)
- .indexRouting(aliasAction.indexRouting())
- .searchRouting(aliasAction.searchRouting())
- .build();
- // Check if this alias already exists
- AliasMetaData aliasMd = indexMetaData.getAliases().get(aliasAction.alias());
- if (aliasMd != null && aliasMd.equals(newAliasMd)) {
- // It's the same alias - ignore it
- continue;
- }
- indexMetaDataBuilder.putAlias(newAliasMd);
- } else if (aliasAction.actionType() == AliasAction.Type.REMOVE) {
- if (!indexMetaData.getAliases().containsKey(aliasAction.alias())) {
- // This alias doesn't exist - ignore
- continue;
+ for (ObjectCursor<MappingMetaData> cursor : index.getMappings().values()) {
+ MappingMetaData mappingMetaData = cursor.value;
+ indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(),
+ MapperService.MergeReason.MAPPING_RECOVERY, false);
+ }
+ indicesToClose.add(index.getIndex());
}
- indexMetaDataBuilder.removeAlias(aliasAction.alias());
+ indices.put(action.getIndex(), indexService);
}
- changed = true;
- builder.put(indexMetaDataBuilder);
+ aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext());
}
+ };
+ changed |= action.apply(newAliasValidator, metadata, index);
+ }
- if (changed) {
- ClusterState updatedState = ClusterState.builder(currentState).metaData(builder).build();
- // even though changes happened, they resulted in 0 actual changes to metadata
- // i.e. remove and add the same alias to the same index
- if (!updatedState.metaData().equalsAliases(currentState.metaData())) {
- return updatedState;
- }
- }
- return currentState;
- } finally {
- for (Index index : indicesToClose) {
- indicesService.removeIndex(index, "created for alias processing");
- }
+ if (changed) {
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(metadata).build();
+ // even though changes happened, they resulted in 0 actual changes to metadata
+ // i.e. remove and add the same alias to the same index
+ if (!updatedState.metaData().equalsAliases(currentState.metaData())) {
+ return updatedState;
}
}
- });
+ return currentState;
+ } finally {
+ for (Index index : indicesToClose) {
+ indicesService.removeIndex(index, "created for alias processing");
+ }
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
index 53a0ede809..fd7c34dbe6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
@@ -31,7 +31,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
@@ -125,11 +124,10 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.remove(index.getIndex().getName());
}
- RoutingAllocation.Result routingResult = allocationService.reroute(
+ //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
+ return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices closed [" + indicesAsString + "]");
- //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
});
}
@@ -188,11 +186,10 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
}
- RoutingAllocation.Result routingResult = allocationService.reroute(
+ //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
+ return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices opened [" + indicesAsString + "]");
- //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
});
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
index 101f59f3ae..4ffcf33097 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
@@ -32,6 +32,7 @@ import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
@@ -63,15 +64,21 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
private final IndicesService indicesService;
private final MetaDataCreateIndexService metaDataCreateIndexService;
private final NodeServicesProvider nodeServicesProvider;
+ private final IndexScopedSettings indexScopedSettings;
@Inject
- public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
+ public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService,
+ MetaDataCreateIndexService metaDataCreateIndexService,
+ AliasValidator aliasValidator, IndicesService indicesService,
+ NodeServicesProvider nodeServicesProvider,
+ IndexScopedSettings indexScopedSettings) {
super(settings);
this.clusterService = clusterService;
this.aliasValidator = aliasValidator;
this.indicesService = indicesService;
this.metaDataCreateIndexService = metaDataCreateIndexService;
this.nodeServicesProvider = nodeServicesProvider;
+ this.indexScopedSettings = indexScopedSettings;
}
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
@@ -204,6 +211,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
createdIndex = dummyIndexService.index();
templateBuilder.order(request.order);
+ templateBuilder.version(request.version);
templateBuilder.template(request.template);
templateBuilder.settings(request.settings);
@@ -259,6 +267,14 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
+ try {
+ indexScopedSettings.validate(request.settings);
+ } catch (IllegalArgumentException iae) {
+ validationErrors.add(iae.getMessage());
+ for (Throwable t : iae.getSuppressed()) {
+ validationErrors.add(t.getMessage());
+ }
+ }
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
validationErrors.addAll(indexSettingsValidation);
if (!validationErrors.isEmpty()) {
@@ -288,6 +304,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
final String cause;
boolean create;
int order;
+ Integer version;
String template;
Settings settings = Settings.Builder.EMPTY_SETTINGS;
Map<String, String> mappings = new HashMap<>();
@@ -345,6 +362,11 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
this.masterTimeout = masterTimeout;
return this;
}
+
+ public PutRequest version(Integer version) {
+ this.version = version;
+ return this;
+ }
}
public static class PutResponse {
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
index d1141aeb9f..d9faa52068 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
@@ -26,13 +26,16 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
+import java.util.AbstractMap;
import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
/**
* This service is responsible for upgrading legacy index metadata to the current version
@@ -100,16 +103,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
* Returns true if this index can be supported by the current version of elasticsearch
*/
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
- if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
- // The index was created with elasticsearch that was using Lucene 5.2.1
- return true;
- }
- if (indexMetaData.getMinimumCompatibleVersion() != null &&
- indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) {
- //The index was upgraded we can work with it
- return true;
- }
- return false;
+ return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1);
}
/**
@@ -121,9 +115,30 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
+ final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ throw new UnsupportedOperationException("shouldn't be here");
+ }
+ });
+ // this is just a fake map that always returns the same value for any possible string key
+ // also the entrySet impl isn't fully correct but we implement it since internally
+ // IndexAnalyzers will iterate over all analyzers to close them.
+ final Map<String, NamedAnalyzer> analyzerMap = new AbstractMap<String, NamedAnalyzer>() {
+ @Override
+ public NamedAnalyzer get(Object key) {
+ assert key instanceof String : "key must be a string but was: " + key.getClass();
+ return new NamedAnalyzer((String)key, fakeDefault.analyzer());
+ }
- try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
- MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
+ @Override
+ public Set<Entry<String, NamedAnalyzer>> entrySet() {
+ // just to ensure we can iterate over this single analzyer
+ return Collections.singletonMap(fakeDefault.name(), fakeDefault).entrySet();
+ }
+ };
+ try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) {
+ MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null);
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
@@ -143,34 +158,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
return IndexMetaData.builder(indexMetaData).settings(settings).build();
}
- /**
- * A fake analysis server that returns the same keyword analyzer for all requests
- */
- private static class FakeAnalysisService extends AnalysisService {
-
- private Analyzer fakeAnalyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- throw new UnsupportedOperationException("shouldn't be here");
- }
- };
-
- public FakeAnalysisService(IndexSettings indexSettings) {
- super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
- }
-
- @Override
- public NamedAnalyzer analyzer(String name) {
- return new NamedAnalyzer(name, fakeAnalyzer);
- }
-
- @Override
- public void close() {
- fakeAnalyzer.close();
- super.close();
- }
- }
-
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
final Settings settings = indexMetaData.getSettings();
final Settings upgrade = indexScopedSettings.archiveUnknownOrBrokenSettings(settings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index c44fee0fb2..8ce58637b1 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -20,6 +20,8 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
@@ -193,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
} catch (Exception e) {
- logger.warn("[{}] failed to refresh-mapping in cluster state", e, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
}
return dirty;
}
@@ -207,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent {
refreshTask,
ClusterStateTaskConfig.build(Priority.HIGH),
refreshExecutor,
- (source, e) -> logger.warn("failure during [{}]", e, source)
+ (source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
index 9db777a479..bd8b09e8e1 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
@@ -33,7 +33,6 @@ import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
@@ -228,6 +227,9 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1);
if (updatedNumberOfReplicas != -1 && preserveExisting == false) {
+ // we do *not* update the in sync allocation ids as they will be removed upon the first index
+ // operation which make these copies stale
+ // TODO: update the list once the data is deleted by the node?
routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
metaDataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices);
@@ -271,8 +273,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder.build()).blocks(blocks).build();
// now, reroute in case things change that require it (like number of replicas)
- RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
- updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
+ updatedState = allocationService.reroute(updatedState, "settings update");
try {
for (Index index : openIndices) {
final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index);
diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
index 0c39c43bc9..55be77d201 100644
--- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
+++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
@@ -96,7 +96,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
* @param version the version of the node
*/
public DiscoveryNode(final String id, TransportAddress address, Version version) {
- this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
+ this(id, address, Collections.emptyMap(), EnumSet.allOf(Role.class), version);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
index 29d74dd893..55c6750b82 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
@@ -106,12 +106,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
removeIfSameTask(this);
- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "assign delayed unassigned shards");
- if (routingResult.changed()) {
- return ClusterState.builder(currentState).routingResult(routingResult).build();
- } else {
- return currentState;
- }
+ return allocationService.reroute(currentState, "assign delayed unassigned shards");
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
index 0fe4936917..58ee6d70f2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
@@ -23,9 +23,14 @@ import com.carrotsearch.hppc.IntSet;
import com.carrotsearch.hppc.cursors.IntCursor;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -128,6 +133,22 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
throw new IllegalStateException("shard routing has an index [" + shardRouting.index() + "] that is different " +
"from the routing table");
}
+ final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shardRouting.id());
+ if (shardRouting.active() &&
+ inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) {
+ throw new IllegalStateException("active shard routing " + shardRouting + " has no corresponding entry in the in-sync " +
+ "allocation set " + inSyncAllocationIds);
+ }
+
+ if (indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1) &&
+ IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) == false && // see #20650
+ shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
+ RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false &&
+ inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false)
+ throw new IllegalStateException("a primary shard routing " + shardRouting + " is a primary that is recovering from " +
+ "a known allocation id but has no corresponding entry in the in-sync " +
+ "allocation set " + inSyncAllocationIds);
+
}
}
return true;
@@ -352,7 +373,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
}
/**
- * Initializes a new empty index, as if it was created from an API.
+ * Initializes an existing index.
*/
public Builder initializeAsRecovery(IndexMetaData indexMetaData) {
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null));
@@ -375,27 +396,27 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
/**
* Initializes a new empty index, to be restored from a snapshot
*/
- public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
+ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) {
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED,
- "restore_source[" + restoreSource.snapshot().getRepository() + "/" +
- restoreSource.snapshot().getSnapshotId().getName() + "]");
- return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, unassignedInfo);
+ "restore_source[" + recoverySource.snapshot().getRepository() + "/" +
+ recoverySource.snapshot().getSnapshotId().getName() + "]");
+ return initializeAsRestore(indexMetaData, recoverySource, ignoreShards, true, unassignedInfo);
}
/**
* Initializes an existing index, to be restored from a snapshot
*/
- public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ public Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) {
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
- "restore_source[" + restoreSource.snapshot().getRepository() + "/" +
- restoreSource.snapshot().getSnapshotId().getName() + "]");
- return initializeAsRestore(indexMetaData, restoreSource, null, false, unassignedInfo);
+ "restore_source[" + recoverySource.snapshot().getRepository() + "/" +
+ recoverySource.snapshot().getSnapshotId().getName() + "]");
+ return initializeAsRestore(indexMetaData, recoverySource, null, false, unassignedInfo);
}
/**
* Initializes an index, to be restored from snapshot
*/
- private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) {
+ private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) {
assert indexMetaData.getIndex().equals(index);
if (!shards.isEmpty()) {
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
@@ -404,11 +425,14 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
ShardId shardId = new ShardId(index, shardNumber);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
+ boolean primary = i == 0;
if (asNew && ignoreShards.contains(shardNumber)) {
// This shards wasn't completely snapshotted - restore it as new shard
- indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
+ indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
+ primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE, unassignedInfo));
} else {
- indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
+ indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
+ primary ? recoverySource : PeerRecoverySource.INSTANCE, unassignedInfo));
}
}
shards.put(shardNumber, indexShardRoutingBuilder.build());
@@ -426,9 +450,28 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
}
for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) {
ShardId shardId = new ShardId(index, shardNumber);
+ final RecoverySource primaryRecoverySource;
+ if (indexMetaData.inSyncAllocationIds(shardNumber).isEmpty() == false) {
+ // we have previous valid copies for this shard. use them for recovery
+ primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
+ } else if (indexMetaData.getCreationVersion().before(Version.V_5_0_0_alpha1) &&
+ unassignedInfo.getReason() != UnassignedInfo.Reason.INDEX_CREATED // tests can create old indices
+ ) {
+ // the index is old and didn't maintain inSyncAllocationIds. Fall back to old behavior and require
+ // finding existing copies
+ primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
+ } else if (indexMetaData.getMergeSourceIndex() != null) {
+ // this is a new index but the initial shards should merged from another index
+ primaryRecoverySource = LocalShardsRecoverySource.INSTANCE;
+ } else {
+ // a freshly created index with no restriction
+ primaryRecoverySource = StoreRecoverySource.EMPTY_STORE_INSTANCE;
+ }
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
- indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
+ boolean primary = i == 0;
+ indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
+ primary ? primaryRecoverySource : PeerRecoverySource.INSTANCE, unassignedInfo));
}
shards.put(shardNumber, indexShardRoutingBuilder.build());
}
@@ -440,7 +483,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
int shardNumber = cursor.value;
ShardId shardId = new ShardId(index, shardNumber);
// version 0, will get updated when reroute will happen
- ShardRouting shard = ShardRouting.newUnassigned(shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
shards.put(shardNumber,
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java b/core/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java
new file mode 100644
index 0000000000..f613cdbbad
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.snapshots.Snapshot;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Objects;
+
+/**
+ * Represents the recovery source of a shard. Available recovery types are:
+ *
+ * - {@link StoreRecoverySource} recovery from the local store (empty or with existing data)
+ * - {@link PeerRecoverySource} recovery from a primary on another node
+ * - {@link SnapshotRecoverySource} recovery from a snapshot
+ * - {@link LocalShardsRecoverySource} recovery from other shards of another index on the same node
+ */
+public abstract class RecoverySource implements Writeable, ToXContent {
+
+ @Override
+ public final XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("type", getType());
+ addAdditionalFields(builder, params);
+ return builder.endObject();
+ }
+
+ /**
+ * to be overridden by subclasses
+ */
+ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params params) throws IOException {
+
+ }
+
+ public static RecoverySource readFrom(StreamInput in) throws IOException {
+ Type type = Type.values()[in.readByte()];
+ switch (type) {
+ case EMPTY_STORE: return StoreRecoverySource.EMPTY_STORE_INSTANCE;
+ case EXISTING_STORE: return StoreRecoverySource.EXISTING_STORE_INSTANCE;
+ case PEER: return PeerRecoverySource.INSTANCE;
+ case SNAPSHOT: return new SnapshotRecoverySource(in);
+ case LOCAL_SHARDS: return LocalShardsRecoverySource.INSTANCE;
+ default: throw new IllegalArgumentException("unknown recovery type: " + type.name());
+ }
+ }
+
+ @Override
+ public final void writeTo(StreamOutput out) throws IOException {
+ out.writeByte((byte) getType().ordinal());
+ writeAdditionalFields(out);
+ }
+
+ /**
+ * to be overridden by subclasses
+ */
+ protected void writeAdditionalFields(StreamOutput out) throws IOException {
+
+ }
+
+ public enum Type {
+ EMPTY_STORE,
+ EXISTING_STORE,
+ PEER,
+ SNAPSHOT,
+ LOCAL_SHARDS
+ }
+
+ public abstract Type getType();
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RecoverySource that = (RecoverySource) o;
+
+ return getType() == that.getType();
+ }
+
+ @Override
+ public int hashCode() {
+ return getType().hashCode();
+ }
+
+ /**
+ * recovery from an existing on-disk store or a fresh copy
+ */
+ public abstract static class StoreRecoverySource extends RecoverySource {
+ public static final StoreRecoverySource EMPTY_STORE_INSTANCE = new StoreRecoverySource() {
+ @Override
+ public Type getType() {
+ return Type.EMPTY_STORE;
+ }
+ };
+ public static final StoreRecoverySource EXISTING_STORE_INSTANCE = new StoreRecoverySource() {
+ @Override
+ public Type getType() {
+ return Type.EXISTING_STORE;
+ }
+ };
+
+ @Override
+ public String toString() {
+ return getType() == Type.EMPTY_STORE ? "new shard recovery" : "existing recovery";
+ }
+ }
+
+ /**
+ * recovery from other shards on same node (shrink index action)
+ */
+ public static class LocalShardsRecoverySource extends RecoverySource {
+
+ public static final LocalShardsRecoverySource INSTANCE = new LocalShardsRecoverySource();
+
+ private LocalShardsRecoverySource() {
+ }
+
+ @Override
+ public Type getType() {
+ return Type.LOCAL_SHARDS;
+ }
+
+ @Override
+ public String toString() {
+ return "local shards recovery";
+ }
+
+ }
+
+ /**
+ * recovery from a snapshot
+ */
+ public static class SnapshotRecoverySource extends RecoverySource {
+ private final Snapshot snapshot;
+ private final String index;
+ private final Version version;
+
+ public SnapshotRecoverySource(Snapshot snapshot, Version version, String index) {
+ this.snapshot = Objects.requireNonNull(snapshot);
+ this.version = Objects.requireNonNull(version);
+ this.index = Objects.requireNonNull(index);
+ }
+
+ SnapshotRecoverySource(StreamInput in) throws IOException {
+ snapshot = new Snapshot(in);
+ version = Version.readVersion(in);
+ index = in.readString();
+ }
+
+ public Snapshot snapshot() {
+ return snapshot;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public Version version() {
+ return version;
+ }
+
+ @Override
+ protected void writeAdditionalFields(StreamOutput out) throws IOException {
+ snapshot.writeTo(out);
+ Version.writeVersion(version, out);
+ out.writeString(index);
+ }
+
+ @Override
+ public Type getType() {
+ return Type.SNAPSHOT;
+ }
+
+ @Override
+ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.field("repository", snapshot.getRepository())
+ .field("snapshot", snapshot.getSnapshotId().getName())
+ .field("version", version.toString())
+ .field("index", index);
+ }
+
+ @Override
+ public String toString() {
+ return "snapshot recovery from " + snapshot.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ @SuppressWarnings("unchecked") SnapshotRecoverySource that = (SnapshotRecoverySource) o;
+ return snapshot.equals(that.snapshot) && index.equals(that.index) && version.equals(that.version);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(snapshot, index, version);
+ }
+
+ }
+
+ /**
+ * peer recovery from a primary shard
+ */
+ public static class PeerRecoverySource extends RecoverySource {
+
+ public static final PeerRecoverySource INSTANCE = new PeerRecoverySource();
+
+ private PeerRecoverySource() {
+ }
+
+ @Override
+ public Type getType() {
+ return Type.PEER;
+ }
+
+ @Override
+ public String toString() {
+ return "peer recovery";
+ }
+ }
+
+ private static EnumSet<RecoverySource.Type> INITIAL_RECOVERY_TYPES = EnumSet.of(Type.EMPTY_STORE, Type.LOCAL_SHARDS, Type.SNAPSHOT);
+
+ /**
+ * returns true for recovery types that indicate that a primary is being allocated for the very first time.
+ * This recoveries can be controlled by {@link IndexMetaData#INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING}
+ */
+ public static boolean isInitialRecovery(RecoverySource.Type type) {
+ return INITIAL_RECOVERY_TYPES.contains(type);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
deleted file mode 100644
index f80e55ed8b..0000000000
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.cluster.routing;
-
-import org.elasticsearch.Version;
-import org.elasticsearch.snapshots.Snapshot;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-
-import java.io.IOException;
-import java.util.Objects;
-
-/**
- * Represents snapshot and index from which a recovering index should be restored
- */
-public class RestoreSource implements Streamable, ToXContent {
-
- private Snapshot snapshot;
-
- private String index;
-
- private Version version;
-
- RestoreSource() {
- }
-
- public RestoreSource(Snapshot snapshot, Version version, String index) {
- this.snapshot = Objects.requireNonNull(snapshot);
- this.version = Objects.requireNonNull(version);
- this.index = Objects.requireNonNull(index);
- }
-
- public Snapshot snapshot() {
- return snapshot;
- }
-
- public String index() {
- return index;
- }
-
- public Version version() {
- return version;
- }
-
- public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException {
- return in.readOptionalStreamable(RestoreSource::new);
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- snapshot = new Snapshot(in);
- version = Version.readVersion(in);
- index = in.readString();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- snapshot.writeTo(out);
- Version.writeVersion(version, out);
- out.writeString(index);
- }
-
- @Override
- public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- return builder.startObject()
- .field("repository", snapshot.getRepository())
- .field("snapshot", snapshot.getSnapshotId().getName())
- .field("version", version.toString())
- .field("index", index)
- .endObject();
- }
-
- @Override
- public String toString() {
- return snapshot.toString();
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- @SuppressWarnings("unchecked") RestoreSource that = (RestoreSource) o;
- return snapshot.equals(that.snapshot) && index.equals(that.index);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(snapshot, index);
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java
index d54df7e080..0f3a8c6f21 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java
@@ -26,7 +26,7 @@ public interface RoutingChangesObserver {
/**
* Called when unassigned shard is initialized. Does not include initializing relocation target shards.
*/
- void shardInitialized(ShardRouting unassignedShard);
+ void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard);
/**
* Called when an initializing shard is started.
@@ -77,7 +77,7 @@ public interface RoutingChangesObserver {
class AbstractRoutingChangesObserver implements RoutingChangesObserver {
@Override
- public void shardInitialized(ShardRouting unassignedShard) {
+ public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
}
@@ -131,9 +131,9 @@ public interface RoutingChangesObserver {
}
@Override
- public void shardInitialized(ShardRouting unassignedShard) {
+ public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
for (RoutingChangesObserver routingChangesObserver : routingChangesObservers) {
- routingChangesObserver.shardInitialized(unassignedShard);
+ routingChangesObserver.shardInitialized(unassignedShard, initializedShard);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java
index 8403f45a55..4ba277d99c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java
@@ -19,18 +19,15 @@
package org.elasticsearch.cluster.routing;
-import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.shard.ShardId;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Comparator;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
-import java.util.Map;
/**
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
@@ -103,7 +100,8 @@ public class RoutingNode implements Iterable<ShardRouting> {
*/
void add(ShardRouting shard) {
if (shards.containsKey(shard.shardId())) {
- throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId + "] where it already exists");
+ throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId
+ + "] where it already exists. current [" + shards.get(shard.shardId()) + "]. new [" + shard + "]");
}
shards.put(shard.shardId(), shard);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
index 2ac9fcc8dd..bd5113029c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
@@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
@@ -166,7 +166,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany);
- if (routing.isPeerRecovery()) {
+ if (routing.recoverySource().getType() == RecoverySource.Type.PEER) {
// add/remove corresponding outgoing recovery on node with primary shard
if (primary == null) {
throw new IllegalStateException("shard is peer recovering but primary is unassigned");
@@ -177,7 +177,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// primary is done relocating, move non-primary recoveries from old primary to new primary
int numRecoveringReplicas = 0;
for (ShardRouting assigned : assignedShards(routing.shardId())) {
- if (assigned.primary() == false && assigned.isPeerRecovery()) {
+ if (assigned.primary() == false && assigned.initializing() &&
+ assigned.recoverySource().getType() == RecoverySource.Type.PEER) {
numRecoveringReplicas++;
}
}
@@ -198,7 +199,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
@Nullable
private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) {
ShardRouting primary = null;
- if (routing.isPeerRecovery()) {
+ if (routing.recoverySource() != null && routing.recoverySource().getType() == RecoverySource.Type.PEER) {
List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());
if (shardRoutings != null) {
for (ShardRouting shardRouting : shardRoutings) {
@@ -420,7 +421,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
addRecovery(initializedShard);
assignedShardsAdd(initializedShard);
- routingChangesObserver.shardInitialized(unassignedShard);
+ routingChangesObserver.shardInitialized(unassignedShard, initializedShard);
return initializedShard;
}
@@ -451,7 +452,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*
* @return the started shard
*/
- public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
+ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
ensureMutable();
ShardRouting startedShard = started(initializingShard);
logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard);
@@ -483,7 +484,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
*
*/
- public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
+ public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
RoutingChangesObserver routingChangesObserver) {
ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed";
@@ -627,7 +628,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
private ShardRouting promoteActiveReplicaShardToPrimary(ShardRouting replicaShard) {
assert replicaShard.active() : "non-active shard cannot be promoted to primary: " + replicaShard;
assert replicaShard.primary() == false : "primary shard cannot be promoted to primary: " + replicaShard;
- ShardRouting primaryShard = replicaShard.moveToPrimary();
+ ShardRouting primaryShard = replicaShard.moveActiveReplicaToPrimary();
updateAssigned(replicaShard, primaryShard);
return primaryShard;
}
@@ -701,10 +702,11 @@ public class RoutingNodes implements Iterable<RoutingNode> {
if (candidate.relocating()) {
cancelRelocation(candidate);
}
- ShardRouting reinitializedShard = candidate.reinitializeShard();
+ ShardRouting reinitializedShard = candidate.reinitializePrimaryShard();
updateAssigned(candidate, reinitializedShard);
inactivePrimaryCount++;
inactiveShardCount++;
+ addRecovery(reinitializedShard);
return reinitializedShard;
}
@@ -738,7 +740,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
assert shard.unassigned() == false : "only assigned shards can be moved to unassigned (" + shard + ")";
assert shard.primary() : "only primary can be demoted to replica (" + shard + ")";
remove(shard);
- ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo).moveFromPrimary();
+ ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo).moveUnassignedFromPrimary();
unassignedShards.add(unassigned);
return unassigned;
}
@@ -832,7 +834,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
currInfo.getNumFailedAllocations(), currInfo.getUnassignedTimeInNanos(),
currInfo.getUnassignedTimeInMillis(), currInfo.isDelayed(),
allocationStatus);
- ShardRouting updatedShard = shard.updateUnassignedInfo(newInfo);
+ ShardRouting updatedShard = shard.updateUnassigned(newInfo, shard.recoverySource());
changes.unassignedInfoUpdated(shard, newInfo);
shard = updatedShard;
}
@@ -891,14 +893,16 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
/**
- * updates the unassigned info on the current unassigned shard
+ * updates the unassigned info and recovery source on the current unassigned shard
*
* @param unassignedInfo the new unassigned info to use
+ * @param recoverySource the new recovery source to use
* @return the shard with unassigned info updated
*/
- public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo, RoutingChangesObserver changes) {
+ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource,
+ RoutingChangesObserver changes) {
nodes.ensureMutable();
- ShardRouting updatedShardRouting = current.updateUnassignedInfo(unassignedInfo);
+ ShardRouting updatedShardRouting = current.updateUnassigned(unassignedInfo, recoverySource);
changes.unassignedInfoUpdated(current, unassignedInfo);
updateShardRouting(updatedShardRouting);
return updatedShardRouting;
@@ -1040,9 +1044,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
if (routing.initializing()) {
incoming++;
}
- if (routing.primary() && routing.isPeerRecovery() == false) {
+ if (routing.primary() && routing.isRelocationTarget() == false) {
for (ShardRouting assigned : routingNodes.assignedShards.get(routing.shardId())) {
- if (assigned.isPeerRecovery()) {
+ if (assigned.initializing() && assigned.recoverySource().getType() == RecoverySource.Type.PEER) {
outgoing++;
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
index cfe48dd711..9dd2cc72da 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
@@ -19,11 +19,12 @@
package org.elasticsearch.cluster.routing;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@@ -94,12 +95,7 @@ public class RoutingService extends AbstractLifecycleComponent {
@Override
public ClusterState execute(ClusterState currentState) {
rerouting.set(false);
- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, reason);
- if (!routingResult.changed()) {
- // no state changed
- return currentState;
- }
- return ClusterState.builder(currentState).routingResult(routingResult).build();
+ return allocationService.reroute(currentState, reason);
}
@Override
@@ -113,16 +109,16 @@ public class RoutingService extends AbstractLifecycleComponent {
rerouting.set(false);
ClusterState state = clusterService.state();
if (logger.isTraceEnabled()) {
- logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint());
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
} else {
- logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version());
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
}
}
});
} catch (Exception e) {
rerouting.set(false);
ClusterState state = clusterService.state();
- logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
index 6b7651b5bf..2d960ce045 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -27,6 +27,7 @@ import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -87,6 +88,11 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return indicesRouting.containsKey(index);
}
+ public boolean hasIndex(Index index) {
+ IndexRoutingTable indexRouting = index(index.getName());
+ return indexRouting != null && indexRouting.getIndex().equals(index);
+ }
+
public IndexRoutingTable index(String index) {
return indicesRouting.get(index);
}
@@ -540,16 +546,16 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return this;
}
- public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ public Builder addAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
- .initializeAsRestore(indexMetaData, restoreSource);
+ .initializeAsRestore(indexMetaData, recoverySource);
add(indexRoutingBuilder);
return this;
}
- public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
+ public Builder addAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
- .initializeAsNewRestore(indexMetaData, restoreSource, ignoreShards);
+ .initializeAsNewRestore(indexMetaData, recoverySource, ignoreShards);
add(indexRoutingBuilder);
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
index be7942e261..e441fd8111 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
@@ -19,8 +19,8 @@
package org.elasticsearch.cluster.routing;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -51,7 +51,7 @@ public final class ShardRouting implements Writeable, ToXContent {
private final String relocatingNodeId;
private final boolean primary;
private final ShardRoutingState state;
- private final RestoreSource restoreSource;
+ private final RecoverySource recoverySource;
private final UnassignedInfo unassignedInfo;
private final AllocationId allocationId;
private final transient List<ShardRouting> asList;
@@ -64,29 +64,31 @@ public final class ShardRouting implements Writeable, ToXContent {
* by either this class or tests. Visible for testing.
*/
ShardRouting(ShardId shardId, String currentNodeId,
- String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
+ String relocatingNodeId, boolean primary, ShardRoutingState state, RecoverySource recoverySource,
UnassignedInfo unassignedInfo, AllocationId allocationId, long expectedShardSize) {
this.shardId = shardId;
this.currentNodeId = currentNodeId;
this.relocatingNodeId = relocatingNodeId;
this.primary = primary;
this.state = state;
- this.asList = Collections.singletonList(this);
- this.restoreSource = restoreSource;
+ this.recoverySource = recoverySource;
this.unassignedInfo = unassignedInfo;
this.allocationId = allocationId;
this.expectedShardSize = expectedShardSize;
this.targetRelocatingShard = initializeTargetRelocatingShard();
+ this.asList = Collections.singletonList(this);
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
+ assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state;
+ assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : "replica shards always recover from primary";
}
@Nullable
private ShardRouting initializeTargetRelocatingShard() {
if (state == ShardRoutingState.RELOCATING) {
- return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary,
- ShardRoutingState.INITIALIZING, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize);
+ return new ShardRouting(shardId, relocatingNodeId, currentNodeId, primary, ShardRoutingState.INITIALIZING,
+ PeerRecoverySource.INSTANCE, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize);
} else {
return null;
}
@@ -95,8 +97,8 @@ public final class ShardRouting implements Writeable, ToXContent {
/**
* Creates a new unassigned shard.
*/
- public static ShardRouting newUnassigned(ShardId shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
- return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
+ public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, UnassignedInfo unassignedInfo) {
+ return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
public Index index() {
@@ -200,13 +202,6 @@ public final class ShardRouting implements Writeable, ToXContent {
}
/**
- * Snapshot id and repository where this shard is being restored from
- */
- public RestoreSource restoreSource() {
- return restoreSource;
- }
-
- /**
* Additional metadata on why the shard is/was unassigned. The metadata is kept around
* until the shard moves to STARTED.
*/
@@ -244,32 +239,6 @@ public final class ShardRouting implements Writeable, ToXContent {
return shardId;
}
- public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
- if (active()) {
- return true;
- }
-
- // initializing replica might not have unassignedInfo
- assert unassignedInfo != null || (primary == false && state == ShardRoutingState.INITIALIZING);
- if (unassignedInfo != null && unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
- return false;
- }
-
- if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
- // when no shards with this id have ever been active for this index
- return false;
- }
-
- return true;
- }
-
- /**
- * returns true for initializing shards that recover their data from another shard copy
- */
- public boolean isPeerRecovery() {
- return state == ShardRoutingState.INITIALIZING && (primary() == false || relocatingNodeId != null);
- }
-
/**
* A shard iterator with just this shard in it.
*/
@@ -283,7 +252,11 @@ public final class ShardRouting implements Writeable, ToXContent {
relocatingNodeId = in.readOptionalString();
primary = in.readBoolean();
state = ShardRoutingState.fromValue(in.readByte());
- restoreSource = RestoreSource.readOptionalRestoreSource(in);
+ if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
+ recoverySource = RecoverySource.readFrom(in);
+ } else {
+ recoverySource = null;
+ }
unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
allocationId = in.readOptionalWriteable(AllocationId::new);
final long shardSize;
@@ -312,7 +285,9 @@ public final class ShardRouting implements Writeable, ToXContent {
out.writeOptionalString(relocatingNodeId);
out.writeBoolean(primary);
out.writeByte(state.value());
- out.writeOptionalStreamable(restoreSource);
+ if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
+ recoverySource.writeTo(out);
+ }
out.writeOptionalWriteable(unassignedInfo);
out.writeOptionalWriteable(allocationId);
if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) {
@@ -326,10 +301,10 @@ public final class ShardRouting implements Writeable, ToXContent {
writeToThin(out);
}
- public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
+ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource) {
assert this.unassignedInfo != null : "can only update unassign info if they are already set";
assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed";
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state,
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, recoverySource,
unassignedInfo, allocationId, expectedShardSize);
}
@@ -338,7 +313,17 @@ public final class ShardRouting implements Writeable, ToXContent {
*/
public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
assert state != ShardRoutingState.UNASSIGNED : this;
- return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED,
+ final RecoverySource recoverySource;
+ if (active()) {
+ if (primary()) {
+ recoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
+ } else {
+ recoverySource = PeerRecoverySource.INSTANCE;
+ }
+ } else {
+ recoverySource = recoverySource();
+ }
+ return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource,
unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
@@ -356,7 +341,7 @@ public final class ShardRouting implements Writeable, ToXContent {
} else {
allocationId = AllocationId.newInitializing(existingAllocationId);
}
- return new ShardRouting(shardId, nodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
+ return new ShardRouting(shardId, nodeId, null, primary, ShardRoutingState.INITIALIZING, recoverySource,
unassignedInfo, allocationId, expectedShardSize);
}
@@ -367,7 +352,7 @@ public final class ShardRouting implements Writeable, ToXContent {
*/
public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) {
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, ShardRoutingState.RELOCATING,
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, ShardRoutingState.RELOCATING, recoverySource,
null, AllocationId.newRelocation(allocationId), expectedShardSize);
}
@@ -379,7 +364,7 @@ public final class ShardRouting implements Writeable, ToXContent {
assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
- return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED,
+ return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.STARTED, recoverySource,
null, AllocationId.cancelRelocation(allocationId), UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
@@ -393,17 +378,19 @@ public final class ShardRouting implements Writeable, ToXContent {
assert state == ShardRoutingState.INITIALIZING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
- return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, state, unassignedInfo,
+ return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, unassignedInfo,
AllocationId.finishRelocation(allocationId), expectedShardSize);
}
/**
- * Moves the shard from started to initializing
+ * Moves the primary shard from started to initializing
*/
- public ShardRouting reinitializeShard() {
- assert state == ShardRoutingState.STARTED;
- return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
- new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE);
+ public ShardRouting reinitializePrimaryShard() {
+ assert state == ShardRoutingState.STARTED : this;
+ assert primary : this;
+ return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING,
+ StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
+ AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
/**
@@ -418,39 +405,36 @@ public final class ShardRouting implements Writeable, ToXContent {
// relocation target
allocationId = AllocationId.finishRelocation(allocationId);
}
- return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED, null, allocationId,
+ return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.STARTED, null, null, allocationId,
UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
/**
- * Make the shard primary unless it's not Primary
+ * Make the active shard primary unless it's not primary
*
* @throws IllegalShardRoutingStateException if shard is already a primary
*/
- public ShardRouting moveToPrimary() {
+ public ShardRouting moveActiveReplicaToPrimary() {
+ assert active(): "expected an active shard " + this;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, true, state, unassignedInfo, allocationId,
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, true, state, recoverySource, unassignedInfo, allocationId,
expectedShardSize);
}
/**
- * Set the primary shard to non-primary
+ * Set the unassigned primary shard to non-primary
*
* @throws IllegalShardRoutingStateException if shard is already a replica
*/
- public ShardRouting moveFromPrimary() {
+ public ShardRouting moveUnassignedFromPrimary() {
+ assert state == ShardRoutingState.UNASSIGNED : "expected an unassigned shard " + this;
if (!primary) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, false, state, unassignedInfo, allocationId,
- expectedShardSize);
- }
-
- /** returns true if this routing has the same shardId as another */
- public boolean isSameShard(ShardRouting other) {
- return getIndexName().equals(other.getIndexName()) && id() == other.id();
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, false, state, PeerRecoverySource.INSTANCE, unassignedInfo,
+ allocationId, expectedShardSize);
}
/**
@@ -490,8 +474,8 @@ public final class ShardRouting implements Writeable, ToXContent {
assert b == false || this.currentNodeId().equals(other.relocatingNodeId) :
"ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]";
- assert b == false || isSameShard(other) :
- "ShardRouting is a relocation target but both routings are not of the same shard. This [" + this + "], other [" + other + "]";
+ assert b == false || this.shardId.equals(other.shardId) :
+ "ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]";
assert b == false || this.primary == other.primary :
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
@@ -517,7 +501,7 @@ public final class ShardRouting implements Writeable, ToXContent {
assert b == false || other.currentNodeId().equals(this.relocatingNodeId) :
"ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]";
- assert b == false || isSameShard(other) :
+ assert b == false || this.shardId.equals(other.shardId) :
"ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]";
assert b == false || this.primary == other.primary :
@@ -526,7 +510,7 @@ public final class ShardRouting implements Writeable, ToXContent {
return b;
}
- /** returns true if the current routing is identical to the other routing in all but meta fields, i.e., version and unassigned info */
+ /** returns true if the current routing is identical to the other routing in all but meta fields, i.e., unassigned info */
public boolean equalsIgnoringMetaData(ShardRouting other) {
if (primary != other.primary) {
return false;
@@ -546,7 +530,7 @@ public final class ShardRouting implements Writeable, ToXContent {
if (state != other.state) {
return false;
}
- if (restoreSource != null ? !restoreSource.equals(other.restoreSource) : other.restoreSource != null) {
+ if (recoverySource != null ? !recoverySource.equals(other.recoverySource) : other.recoverySource != null) {
return false;
}
return true;
@@ -582,7 +566,7 @@ public final class ShardRouting implements Writeable, ToXContent {
h = 31 * h + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
h = 31 * h + (primary ? 1 : 0);
h = 31 * h + (state != null ? state.hashCode() : 0);
- h = 31 * h + (restoreSource != null ? restoreSource.hashCode() : 0);
+ h = 31 * h + (recoverySource != null ? recoverySource.hashCode() : 0);
h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0);
h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
hashCode = h;
@@ -610,8 +594,8 @@ public final class ShardRouting implements Writeable, ToXContent {
} else {
sb.append("[R]");
}
- if (this.restoreSource != null) {
- sb.append(", restoring[" + restoreSource + "]");
+ if (recoverySource != null) {
+ sb.append(", recovery_source[").append(recoverySource).append("]");
}
sb.append(", s[").append(state).append("]");
if (allocationId != null) {
@@ -638,9 +622,8 @@ public final class ShardRouting implements Writeable, ToXContent {
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}
- if (restoreSource() != null) {
- builder.field("restore_source");
- restoreSource().toXContent(builder, params);
+ if (recoverySource != null) {
+ builder.field("recovery_source", recoverySource);
}
if (allocationId != null) {
builder.field("allocation_id");
@@ -659,4 +642,14 @@ public final class ShardRouting implements Writeable, ToXContent {
public long getExpectedShardSize() {
return expectedShardSize;
}
+
+ /**
+ * Returns recovery source for the given shard. Replica shards always recover from the primary {@link PeerRecoverySource}.
+ *
+ * @return recovery source or null if shard is {@link #active()}
+ */
+ @Nullable
+ public RecoverySource recoverySource() {
+ return recoverySource;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
index 09d0c264f2..4670e1e473 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
@@ -108,7 +108,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
/**
* Unassigned as a result of a failed primary while the replica was initializing.
*/
- PRIMARY_FAILED
+ PRIMARY_FAILED,
+ /**
+ * Unassigned after forcing an empty primary
+ */
+ FORCED_EMPTY_PRIMARY
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index 0b9e43c5c8..323adf7804 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -20,7 +20,6 @@
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.ClusterInfoService;
-import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
@@ -32,7 +31,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.Result;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@@ -62,7 +60,6 @@ public class AllocationService extends AbstractComponent {
private final GatewayAllocator gatewayAllocator;
private final ShardsAllocator shardsAllocator;
private final ClusterInfoService clusterInfoService;
- private final ClusterName clusterName;
@Inject
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
@@ -72,81 +69,83 @@ public class AllocationService extends AbstractComponent {
this.gatewayAllocator = gatewayAllocator;
this.shardsAllocator = shardsAllocator;
this.clusterInfoService = clusterInfoService;
- clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
}
/**
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
* provided as parameter and no duplicates should be contained.
* <p>
- * If the same instance of the routing table is returned, then no change has been made.</p>
+ * If the same instance of the {@link ClusterState} is returned, then no change has been made.</p>
*/
- public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
- return applyStartedShards(clusterState, startedShards, true);
- }
-
- public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards, boolean withReroute) {
+ public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
if (startedShards.isEmpty()) {
- return Result.unchanged(clusterState);
+ return clusterState;
}
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
- StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards,
- clusterInfoService.getClusterInfo(), currentNanoTime());
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
+ clusterInfoService.getClusterInfo(), currentNanoTime(), false);
applyStartedShards(allocation, startedShards);
- gatewayAllocator.applyStartedShards(allocation);
- if (withReroute) {
- reroute(allocation);
- }
+ gatewayAllocator.applyStartedShards(allocation, startedShards);
+ reroute(allocation);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
- return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ...");
+ return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "] ...");
}
- protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
- return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations());
+ protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) {
+ return buildResultAndLogHealthChange(oldState, allocation, reason, new RoutingExplanations());
}
- protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
- RoutingTable oldRoutingTable = allocation.routingTable();
+ protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason,
+ RoutingExplanations explanations) {
+ RoutingTable oldRoutingTable = oldState.routingTable();
RoutingNodes newRoutingNodes = allocation.routingNodes();
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build();
- MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges();
+ MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
+ final ClusterState newState = ClusterState.builder(oldState).routingTable(newRoutingTable).metaData(newMetaData).build();
logClusterHealthStateChange(
- new ClusterStateHealth(ClusterState.builder(clusterName).
- metaData(allocation.metaData()).routingTable(oldRoutingTable).build()),
- new ClusterStateHealth(ClusterState.builder(clusterName).
- metaData(newMetaData).routingTable(newRoutingTable).build()),
+ new ClusterStateHealth(oldState),
+ new ClusterStateHealth(newState),
reason
);
- return Result.changed(newRoutingTable, newMetaData, explanations);
+ return newState;
+ }
+
+ public ClusterState applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
+ return applyFailedShards(clusterState, Collections.singletonList(new FailedShard(failedShard, null, null)),
+ Collections.emptyList());
}
- public Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
- return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
+ public ClusterState applyFailedShards(ClusterState clusterState, List<FailedShard> failedShards) {
+ return applyFailedShards(clusterState, failedShards, Collections.emptyList());
}
/**
* Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be
- * provided as parameter and no duplicates should be contained.
+ * provided as parameter. Also applies a list of allocation ids to remove from the in-sync set for shard copies for which there
+ * are no routing entries in the routing table.
*
* <p>
- * If the same instance of the routing table is returned, then no change has been made.</p>
+ * If the same instance of ClusterState is returned, then no change has been made.</p>
*/
- public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
- if (failedShards.isEmpty()) {
- return Result.unchanged(clusterState);
+ public ClusterState applyFailedShards(final ClusterState clusterState, final List<FailedShard> failedShards,
+ final List<StaleShard> staleShards) {
+ if (staleShards.isEmpty() && failedShards.isEmpty()) {
+ return clusterState;
}
- RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
+ ClusterState tmpState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards);
+
+ RoutingNodes routingNodes = getMutableRoutingNodes(tmpState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
long currentNanoTime = currentNanoTime();
- FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards,
- clusterInfoService.getClusterInfo(), currentNanoTime);
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, tmpState,
+ clusterInfoService.getClusterInfo(), currentNanoTime, false);
- for (FailedRerouteAllocation.FailedShard failedShardEntry : failedShards) {
- ShardRouting shardToFail = failedShardEntry.routingEntry;
+ for (FailedShard failedShardEntry : failedShards) {
+ ShardRouting shardToFail = failedShardEntry.getRoutingEntry();
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardToFail.shardId().getIndex());
allocation.addIgnoreShardForNode(shardToFail.shardId(), shardToFail.currentNodeId());
// failing a primary also fails initializing replica shards, re-resolve ShardRouting
@@ -157,26 +156,26 @@ public class AllocationService extends AbstractComponent {
shardToFail.shardId(), shardToFail, failedShard);
}
int failedAllocations = failedShard.unassignedInfo() != null ? failedShard.unassignedInfo().getNumFailedAllocations() : 0;
- UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShardEntry.message,
- failedShardEntry.failure, failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false,
+ UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShardEntry.getMessage(),
+ failedShardEntry.getFailure(), failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false,
AllocationStatus.NO_ATTEMPT);
routingNodes.failShard(logger, failedShard, unassignedInfo, indexMetaData, allocation.changes());
} else {
logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail);
}
}
- gatewayAllocator.applyFailedShards(allocation);
+ gatewayAllocator.applyFailedShards(allocation, failedShards);
reroute(allocation);
- String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString());
- return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
+ String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.getRoutingEntry().shardId().toString());
+ return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "] ...");
}
/**
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
* if needed.
*/
- public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
+ public ClusterState deassociateDeadNodes(final ClusterState clusterState, boolean reroute, String reason) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
@@ -191,9 +190,9 @@ public class AllocationService extends AbstractComponent {
}
if (allocation.routingNodesChanged() == false) {
- return Result.unchanged(clusterState);
+ return clusterState;
}
- return buildResultAndLogHealthChange(allocation, reason);
+ return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
/**
@@ -209,9 +208,10 @@ public class AllocationService extends AbstractComponent {
final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(),
metaData.getIndexSafe(shardRouting.index()).getSettings());
if (newComputedLeftDelayNanos == 0) {
- unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
+ unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(),
- unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()), allocation.changes());
+ unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()),
+ shardRouting.recoverySource(), allocation.changes());
}
}
}
@@ -234,7 +234,7 @@ public class AllocationService extends AbstractComponent {
.collect(Collectors.joining(", "));
}
- public Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
+ public CommandsResult reroute(final ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
@@ -251,25 +251,25 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
- return buildResultAndLogHealthChange(allocation, "reroute commands", explanations);
+ return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands"));
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
- * If the same instance of the routing table is returned, then no change has been made.
+ * If the same instance of ClusterState is returned, then no change has been made.
*/
- public Result reroute(ClusterState clusterState, String reason) {
+ public ClusterState reroute(ClusterState clusterState, String reason) {
return reroute(clusterState, reason, false);
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
- * If the same instance of the routing table is returned, then no change has been made.
+ * If the same instance of ClusterState is returned, then no change has been made.
*/
- protected Result reroute(ClusterState clusterState, String reason, boolean debug) {
+ protected ClusterState reroute(final ClusterState clusterState, String reason, boolean debug) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
@@ -278,9 +278,9 @@ public class AllocationService extends AbstractComponent {
allocation.debugDecision(debug);
reroute(allocation);
if (allocation.routingNodesChanged() == false) {
- return Result.unchanged(clusterState);
+ return clusterState;
}
- return buildResultAndLogHealthChange(allocation, reason);
+ return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
@@ -358,4 +358,39 @@ public class AllocationService extends AbstractComponent {
protected long currentNanoTime() {
return System.nanoTime();
}
+
+ /**
+ * this class is used to describe results of applying a set of
+ * {@link org.elasticsearch.cluster.routing.allocation.command.AllocationCommand}
+ */
+ public static class CommandsResult {
+
+ private final RoutingExplanations explanations;
+
+ private final ClusterState clusterState;
+
+ /**
+ * Creates a new {@link CommandsResult}
+ * @param explanations Explanation for the reroute actions
+ * @param clusterState Resulting cluster state
+ */
+ private CommandsResult(RoutingExplanations explanations, ClusterState clusterState) {
+ this.clusterState = clusterState;
+ this.explanations = explanations;
+ }
+
+ /**
+ * Get the explanation of this result
+ */
+ public RoutingExplanations explanations() {
+ return explanations;
+ }
+
+ /**
+ * thre resulting cluster state, after the commands were applied
+ */
+ public ClusterState getClusterState() {
+ return clusterState;
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java
index 103aa87dcd..390acda0fa 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java
@@ -62,10 +62,10 @@ public class DiskThresholdMonitor extends AbstractComponent implements ClusterIn
*/
private void warnAboutDiskIfNeeded(DiskUsage usage) {
// Check absolute disk values
- if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().bytes()) {
+ if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node",
diskThresholdSettings.getFreeBytesThresholdHigh(), usage);
- } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().bytes()) {
+ } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) {
logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node",
diskThresholdSettings.getFreeBytesThresholdLow(), usage);
}
@@ -100,7 +100,7 @@ public class DiskThresholdMonitor extends AbstractComponent implements ClusterIn
String node = entry.key;
DiskUsage usage = entry.value;
warnAboutDiskIfNeeded(usage);
- if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().bytes() ||
+ if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes() ||
usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) {
if ((System.nanoTime() - lastRunNS) > diskThresholdSettings.getRerouteInterval().nanos()) {
lastRunNS = System.nanoTime();
@@ -112,7 +112,7 @@ public class DiskThresholdMonitor extends AbstractComponent implements ClusterIn
node, diskThresholdSettings.getRerouteInterval());
}
nodeHasPassedWatermark.add(node);
- } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().bytes() ||
+ } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes() ||
usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdLow()) {
nodeHasPassedWatermark.add(node);
} else {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
deleted file mode 100644
index 154acb43bb..0000000000
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.cluster.routing.allocation;
-
-import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.cluster.ClusterInfo;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.routing.RoutingNodes;
-import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
-import org.elasticsearch.index.shard.ShardId;
-
-import java.util.List;
-
-/**
- * This {@link RoutingAllocation} keeps a shard which routing
- * allocation has failed.
- */
-public class FailedRerouteAllocation extends RoutingAllocation {
-
- /**
- * A failed shard with the shard routing itself and an optional
- * details on why it failed.
- */
- public static class FailedShard {
- public final ShardRouting routingEntry;
- public final String message;
- public final Exception failure;
-
- public FailedShard(ShardRouting routingEntry, String message, Exception failure) {
- assert routingEntry.assignedToNode() : "only assigned shards can be failed " + routingEntry;
- this.routingEntry = routingEntry;
- this.message = message;
- this.failure = failure;
- }
-
- @Override
- public String toString() {
- return "failed shard, shard " + routingEntry + ", message [" + message + "], failure [" +
- ExceptionsHelper.detailedMessage(failure) + "]";
- }
- }
-
- private final List<FailedShard> failedShards;
-
- public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState,
- List<FailedShard> failedShards, ClusterInfo clusterInfo, long currentNanoTime) {
- super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
- this.failedShards = failedShards;
- }
-
- public List<FailedShard> failedShards() {
- return failedShards;
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedShard.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedShard.java
new file mode 100644
index 0000000000..9bf9fa86d1
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedShard.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * A class representing a failed shard.
+ */
+public class FailedShard {
+ private final ShardRouting routingEntry;
+ private final String message;
+ private final Exception failure;
+
+ public FailedShard(ShardRouting routingEntry, String message, Exception failure) {
+ assert routingEntry.assignedToNode() : "only assigned shards can be failed " + routingEntry;
+ this.routingEntry = routingEntry;
+ this.message = message;
+ this.failure = failure;
+ }
+
+ @Override
+ public String toString() {
+ return "failed shard, shard " + routingEntry + ", message [" + message + "], failure [" +
+ ExceptionsHelper.detailedMessage(failure) + "]";
+ }
+
+ /**
+ * The shard routing entry for the failed shard.
+ */
+ public ShardRouting getRoutingEntry() {
+ return routingEntry;
+ }
+
+ /**
+ * The failure message, if available, explaining why the shard failed.
+ */
+ @Nullable
+ public String getMessage() {
+ return message;
+ }
+
+ /**
+ * The exception, if present, causing the shard to fail.
+ */
+ @Nullable
+ public Exception getFailure() {
+ return failure;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java
index 1f232cd8ac..f476972b21 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java
@@ -19,15 +19,20 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
+import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
+import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -39,7 +44,7 @@ import java.util.stream.Collectors;
* Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in
* {@link IndexMetaData} once the allocation round has completed.
*
- * Primary terms are updated on primary initialization or primary promotion.
+ * Primary terms are updated on primary initialization or when an active primary fails.
*
* Allocation ids are added for shards that become active and removed for shards that stop being active.
*/
@@ -47,15 +52,16 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
private final Map<ShardId, Updates> shardChanges = new HashMap<>();
@Override
- public void shardInitialized(ShardRouting unassignedShard) {
- if (unassignedShard.primary()) {
- increasePrimaryTerm(unassignedShard);
- }
- }
+ public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
+ assert initializedShard.isRelocationTarget() == false : "shardInitialized is not called on relocation target: " + initializedShard;
+ if (initializedShard.primary()) {
+ increasePrimaryTerm(initializedShard.shardId());
- @Override
- public void replicaPromoted(ShardRouting replicaShard) {
- increasePrimaryTerm(replicaShard);
+ Updates updates = changes(initializedShard.shardId());
+ assert updates.initializedPrimary == null : "Primary cannot be initialized more than once in same allocation round: " +
+ "(previous: " + updates.initializedPrimary + ", next: " + initializedShard + ")";
+ updates.initializedPrimary = initializedShard;
+ }
}
@Override
@@ -65,8 +71,20 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
@Override
public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {
- if (failedShard.active()) {
+ if (failedShard.active() && unassignedInfo.getReason() != UnassignedInfo.Reason.NODE_LEFT) {
removeAllocationId(failedShard);
+
+ if (failedShard.primary()) {
+ Updates updates = changes(failedShard.shardId());
+ if (updates.firstFailedPrimary == null) {
+ // more than one primary can be failed (because of batching, primary can be failed, replica promoted and then failed...)
+ updates.firstFailedPrimary = failedShard;
+ }
+ }
+ }
+
+ if (failedShard.active() && failedShard.primary()) {
+ increasePrimaryTerm(failedShard.shardId());
}
}
@@ -82,48 +100,155 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
/**
* Updates the current {@link MetaData} based on the changes of this RoutingChangesObserver. Specifically
- * we update {@link IndexMetaData#getActiveAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
+ * we update {@link IndexMetaData#getInSyncAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
* the changes made during this allocation.
*
* @param oldMetaData {@link MetaData} object from before the routing nodes was changed.
+ * @param newRoutingTable {@link RoutingTable} object after routing changes were applied.
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
*/
- public MetaData applyChanges(MetaData oldMetaData) {
+ public MetaData applyChanges(MetaData oldMetaData, RoutingTable newRoutingTable) {
Map<Index, List<Map.Entry<ShardId, Updates>>> changesGroupedByIndex =
shardChanges.entrySet().stream().collect(Collectors.groupingBy(e -> e.getKey().getIndex()));
MetaData.Builder metaDataBuilder = null;
for (Map.Entry<Index, List<Map.Entry<ShardId, Updates>>> indexChanges : changesGroupedByIndex.entrySet()) {
Index index = indexChanges.getKey();
- final IndexMetaData oldIndexMetaData = oldMetaData.index(index);
- if (oldIndexMetaData == null) {
- throw new IllegalStateException("no metadata found for index " + index);
- }
+ final IndexMetaData oldIndexMetaData = oldMetaData.getIndexSafe(index);
IndexMetaData.Builder indexMetaDataBuilder = null;
for (Map.Entry<ShardId, Updates> shardEntry : indexChanges.getValue()) {
ShardId shardId = shardEntry.getKey();
Updates updates = shardEntry.getValue();
+ indexMetaDataBuilder = updateInSyncAllocations(newRoutingTable, oldIndexMetaData, indexMetaDataBuilder, shardId, updates);
+ indexMetaDataBuilder = updatePrimaryTerm(oldIndexMetaData, indexMetaDataBuilder, shardId, updates);
+ }
- assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds) :
- "Allocation ids cannot be both added and removed in the same allocation round, added ids: " +
- updates.addedAllocationIds + ", removed ids: " + updates.removedAllocationIds;
+ if (indexMetaDataBuilder != null) {
+ if (metaDataBuilder == null) {
+ metaDataBuilder = MetaData.builder(oldMetaData);
+ }
+ metaDataBuilder.put(indexMetaDataBuilder);
+ }
+ }
- Set<String> activeAllocationIds = new HashSet<>(oldIndexMetaData.activeAllocationIds(shardId.id()));
- activeAllocationIds.addAll(updates.addedAllocationIds);
- activeAllocationIds.removeAll(updates.removedAllocationIds);
- // only update active allocation ids if there is an active shard
- if (activeAllocationIds.isEmpty() == false) {
- if (indexMetaDataBuilder == null) {
- indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
- }
- indexMetaDataBuilder.putActiveAllocationIds(shardId.id(), activeAllocationIds);
+ if (metaDataBuilder != null) {
+ return metaDataBuilder.build();
+ } else {
+ return oldMetaData;
+ }
+ }
+
+ /**
+ * Updates in-sync allocations with routing changes that were made to the routing table.
+ */
+ private IndexMetaData.Builder updateInSyncAllocations(RoutingTable newRoutingTable, IndexMetaData oldIndexMetaData,
+ IndexMetaData.Builder indexMetaDataBuilder, ShardId shardId, Updates updates) {
+ assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds) :
+ "allocation ids cannot be both added and removed in the same allocation round, added ids: " +
+ updates.addedAllocationIds + ", removed ids: " + updates.removedAllocationIds;
+
+ Set<String> oldInSyncAllocationIds = oldIndexMetaData.inSyncAllocationIds(shardId.id());
+
+ // check if we have been force-initializing an empty primary or a stale primary
+ if (updates.initializedPrimary != null && oldInSyncAllocationIds.isEmpty() == false &&
+ oldInSyncAllocationIds.contains(updates.initializedPrimary.allocationId().getId()) == false) {
+ // we're not reusing an existing in-sync allocation id to initialize a primary, which means that we're either force-allocating
+ // an empty or a stale primary (see AllocateEmptyPrimaryAllocationCommand or AllocateStalePrimaryAllocationCommand).
+ RecoverySource.Type recoverySourceType = updates.initializedPrimary.recoverySource().getType();
+ boolean emptyPrimary = recoverySourceType == RecoverySource.Type.EMPTY_STORE;
+ assert updates.addedAllocationIds.isEmpty() : (emptyPrimary ? "empty" : "stale") +
+ " primary is not force-initialized in same allocation round where shards are started";
+
+ if (indexMetaDataBuilder == null) {
+ indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
+ }
+ if (emptyPrimary) {
+ // forcing an empty primary resets the in-sync allocations to the empty set (ShardRouting.allocatedPostIndexCreate)
+ indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(), Collections.emptySet());
+ } else {
+ // forcing a stale primary resets the in-sync allocations to the singleton set with the stale id
+ indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(),
+ Collections.singleton(updates.initializedPrimary.allocationId().getId()));
+ }
+ } else {
+ // standard path for updating in-sync ids
+ Set<String> inSyncAllocationIds = new HashSet<>(oldInSyncAllocationIds);
+ inSyncAllocationIds.addAll(updates.addedAllocationIds);
+ inSyncAllocationIds.removeAll(updates.removedAllocationIds);
+
+ // Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary
+ // but repeatedly shut down nodes that have active replicas.
+ // We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set
+ int maxActiveShards = oldIndexMetaData.getNumberOfReplicas() + 1; // +1 for the primary
+ if (inSyncAllocationIds.size() > maxActiveShards) {
+ // trim entries that have no corresponding shard routing in the cluster state (i.e. trim unavailable copies)
+ List<ShardRouting> assignedShards = newRoutingTable.shardRoutingTable(shardId).assignedShards();
+ assert assignedShards.size() <= maxActiveShards :
+ "cannot have more assigned shards " + assignedShards + " than maximum possible active shards " + maxActiveShards;
+ Set<String> assignedAllocations = assignedShards.stream().map(s -> s.allocationId().getId()).collect(Collectors.toSet());
+ inSyncAllocationIds = inSyncAllocationIds.stream()
+ .sorted(Comparator.comparing(assignedAllocations::contains).reversed()) // values with routing entries first
+ .limit(maxActiveShards)
+ .collect(Collectors.toSet());
+ }
+
+ // only update in-sync allocation ids if there is at least one entry remaining. Assume for example that there only
+ // ever was a primary active and now it failed. If we were to remove the allocation id from the in-sync set, this would
+ // create an empty primary on the next allocation (see ShardRouting#allocatedPostIndexCreate)
+ if (inSyncAllocationIds.isEmpty() && oldInSyncAllocationIds.isEmpty() == false) {
+ assert updates.firstFailedPrimary != null :
+ "in-sync set became empty but active primary wasn't failed: " + oldInSyncAllocationIds;
+ if (updates.firstFailedPrimary != null) {
+ // add back allocation id of failed primary
+ inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());
}
+ }
+
+ assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty() :
+ "in-sync allocations cannot become empty after they have been non-empty: " + oldInSyncAllocationIds;
- if (updates.increaseTerm) {
+ // be extra safe here and only update in-sync set if it is non-empty
+ if (inSyncAllocationIds.isEmpty() == false) {
+ if (indexMetaDataBuilder == null) {
+ indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
+ }
+ indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(), inSyncAllocationIds);
+ }
+ }
+ return indexMetaDataBuilder;
+ }
+
+ /**
+ * Removes allocation ids from the in-sync set for shard copies for which there is no routing entries in the routing table.
+ * This method is called in AllocationService before any changes to the routing table are made.
+ */
+ public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterState, List<StaleShard> staleShards) {
+ MetaData oldMetaData = clusterState.metaData();
+ RoutingTable oldRoutingTable = clusterState.routingTable();
+ MetaData.Builder metaDataBuilder = null;
+ // group staleShards entries by index
+ for (Map.Entry<Index, List<StaleShard>> indexEntry : staleShards.stream().collect(
+ Collectors.groupingBy(fs -> fs.getShardId().getIndex())).entrySet()) {
+ final IndexMetaData oldIndexMetaData = oldMetaData.getIndexSafe(indexEntry.getKey());
+ IndexMetaData.Builder indexMetaDataBuilder = null;
+ // group staleShards entries by shard id
+ for (Map.Entry<ShardId, List<StaleShard>> shardEntry : indexEntry.getValue().stream().collect(
+ Collectors.groupingBy(staleShard -> staleShard.getShardId())).entrySet()) {
+ int shardNumber = shardEntry.getKey().getId();
+ Set<String> oldInSyncAllocations = oldIndexMetaData.inSyncAllocationIds(shardNumber);
+ Set<String> idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet());
+ assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) :
+ "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable.prettyPrint();
+ Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove);
+ assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " +
+ shardEntry.getKey() + " (before: " + oldInSyncAllocations + ", ids to remove: " + idsToRemove + ")";
+ // be extra safe here: if the in-sync set were to become empty, this would create an empty primary on the next allocation
+ // (see ShardRouting#allocatedPostIndexCreate)
+ if (remainingInSyncAllocations.isEmpty() == false) {
if (indexMetaDataBuilder == null) {
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
}
- indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
+ indexMetaDataBuilder.putInSyncAllocationIds(shardNumber, remainingInSyncAllocations);
}
}
@@ -136,10 +261,24 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
}
if (metaDataBuilder != null) {
- return metaDataBuilder.build();
+ return ClusterState.builder(clusterState).metaData(metaDataBuilder).build();
} else {
- return oldMetaData;
+ return clusterState;
+ }
+ }
+
+ /**
+ * Increases the primary term if {@link #increasePrimaryTerm} was called for this shard id.
+ */
+ private IndexMetaData.Builder updatePrimaryTerm(IndexMetaData oldIndexMetaData, IndexMetaData.Builder indexMetaDataBuilder,
+ ShardId shardId, Updates updates) {
+ if (updates.increaseTerm) {
+ if (indexMetaDataBuilder == null) {
+ indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
+ }
+ indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
}
+ return indexMetaDataBuilder;
}
/**
@@ -166,13 +305,15 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
/**
* Increase primary term for this shard id
*/
- private void increasePrimaryTerm(ShardRouting shardRouting) {
- changes(shardRouting.shardId()).increaseTerm = true;
+ private void increasePrimaryTerm(ShardId shardId) {
+ changes(shardId).increaseTerm = true;
}
private static class Updates {
private boolean increaseTerm; // whether primary term should be increased
private Set<String> addedAllocationIds = new HashSet<>(); // allocation ids that should be added to the in-sync set
private Set<String> removedAllocationIds = new HashSet<>(); // allocation ids that should be removed from the in-sync set
+ private ShardRouting initializedPrimary = null; // primary that was initialized from unassigned
+ private ShardRouting firstFailedPrimary = null; // first active primary that was failed
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
index 0794c6d828..8429493b0e 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -46,83 +46,6 @@ import static java.util.Collections.unmodifiableSet;
*/
public class RoutingAllocation {
- /**
- * this class is used to describe results of a {@link RoutingAllocation}
- */
- public static class Result {
-
- private final boolean changed;
-
- private final RoutingTable routingTable;
-
- private final MetaData metaData;
-
- private final RoutingExplanations explanations;
-
- /**
- * Creates a new {@link RoutingAllocation.Result} where no change to the routing table was made.
- * @param clusterState the unchanged {@link ClusterState}
- */
- public static Result unchanged(ClusterState clusterState) {
- return new Result(false, clusterState.routingTable(), clusterState.metaData(), new RoutingExplanations());
- }
-
- /**
- * Creates a new {@link RoutingAllocation.Result} where changes were made to the routing table.
- * @param routingTable the {@link RoutingTable} this Result references
- * @param metaData the {@link MetaData} this Result references
- * @param explanations Explanation for the reroute actions
- */
- public static Result changed(RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
- return new Result(true, routingTable, metaData, explanations);
- }
-
- /**
- * Creates a new {@link RoutingAllocation.Result}
- * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
- * @param routingTable the {@link RoutingTable} this Result references
- * @param metaData the {@link MetaData} this Result references
- * @param explanations Explanation for the reroute actions
- */
- private Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
- this.changed = changed;
- this.routingTable = routingTable;
- this.metaData = metaData;
- this.explanations = explanations;
- }
-
- /** determine whether the actual {@link RoutingTable} has been changed
- * @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
- */
- public boolean changed() {
- return this.changed;
- }
-
- /**
- * Get the {@link MetaData} referenced by this result
- * @return referenced {@link MetaData}
- */
- public MetaData metaData() {
- return metaData;
- }
-
- /**
- * Get the {@link RoutingTable} referenced by this result
- * @return referenced {@link RoutingTable}
- */
- public RoutingTable routingTable() {
- return routingTable;
- }
-
- /**
- * Get the explanation of this result
- * @return explanation
- */
- public RoutingExplanations explanations() {
- return explanations;
- }
- }
-
private final AllocationDeciders deciders;
private final RoutingNodes routingNodes;
@@ -307,8 +230,8 @@ public class RoutingAllocation {
/**
* Returns updated {@link MetaData} based on the changes that were made to the routing nodes
*/
- public MetaData updateMetaDataWithRoutingChanges() {
- return indexMetaDataUpdater.applyChanges(metaData);
+ public MetaData updateMetaDataWithRoutingChanges(RoutingTable newRoutingTable) {
+ return indexMetaDataUpdater.applyChanges(metaData, newRoutingTable);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java
index c971687234..42e80689ee 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java
@@ -38,8 +38,9 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver {
}
@Override
- public void shardInitialized(ShardRouting unassignedShard) {
+ public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
assert unassignedShard.unassigned() : "expected unassigned shard " + unassignedShard;
+ assert initializedShard.initializing() : "expected initializing shard " + initializedShard;
setChanged();
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StaleShard.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StaleShard.java
new file mode 100644
index 0000000000..9454f62db9
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StaleShard.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * A class that represents a stale shard copy.
+ */
+public class StaleShard {
+ private final ShardId shardId;
+ private final String allocationId;
+
+ public StaleShard(ShardId shardId, String allocationId) {
+ this.shardId = shardId;
+ this.allocationId = allocationId;
+ }
+
+ @Override
+ public String toString() {
+ return "stale shard, shard " + shardId + ", alloc. id [" + allocationId + "]";
+ }
+
+ /**
+ * The shard id of the stale shard.
+ */
+ public ShardId getShardId() {
+ return shardId;
+ }
+
+ /**
+ * The allocation id of the stale shard.
+ */
+ public String getAllocationId() {
+ return allocationId;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
deleted file mode 100644
index e63ce2b19e..0000000000
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.cluster.routing.allocation;
-
-import org.elasticsearch.cluster.ClusterInfo;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.routing.RoutingNodes;
-import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
-
-import java.util.List;
-
-/**
- * This {@link RoutingAllocation} holds a list of started shards within a
- * cluster
- */
-public class StartedRerouteAllocation extends RoutingAllocation {
-
- private final List<ShardRouting> startedShards;
-
- public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState,
- List<ShardRouting> startedShards, ClusterInfo clusterInfo, long currentNanoTime) {
- super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
- this.startedShards = startedShards;
- }
-
- /**
- * Get started shards
- * @return list of started shards
- */
- public List<ShardRouting> startedShards() {
- return startedShards;
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java
new file mode 100644
index 0000000000..172360849f
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
+import org.elasticsearch.common.Nullable;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Represents the allocation decision by an allocator for an unassigned shard.
+ */
+public class UnassignedShardDecision {
+ /** a constant representing a shard decision where no decision was taken */
+ public static final UnassignedShardDecision DECISION_NOT_TAKEN =
+ new UnassignedShardDecision(null, null, null, null, null, null);
+
+ @Nullable
+ private final Decision finalDecision;
+ @Nullable
+ private final AllocationStatus allocationStatus;
+ @Nullable
+ private final String finalExplanation;
+ @Nullable
+ private final String assignedNodeId;
+ @Nullable
+ private final String allocationId;
+ @Nullable
+ private final Map<String, Decision> nodeDecisions;
+
+ private UnassignedShardDecision(Decision finalDecision,
+ AllocationStatus allocationStatus,
+ String finalExplanation,
+ String assignedNodeId,
+ String allocationId,
+ Map<String, Decision> nodeDecisions) {
+ assert finalExplanation != null || finalDecision == null :
+ "if a decision was taken, there must be an explanation for it";
+ assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES :
+ "a yes decision must have a node to assign the shard to";
+ assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES :
+ "only a yes decision should not have an allocation status";
+ assert allocationId == null || assignedNodeId != null :
+ "allocation id can only be null if the assigned node is null";
+ this.finalDecision = finalDecision;
+ this.allocationStatus = allocationStatus;
+ this.finalExplanation = finalExplanation;
+ this.assignedNodeId = assignedNodeId;
+ this.allocationId = allocationId;
+ this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
+ }
+
+ /**
+ * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision.
+ */
+ public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) {
+ return noDecision(allocationStatus, explanation, null);
+ }
+
+ /**
+ * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision,
+ * as well as the individual node-level decisions that comprised the final NO decision.
+ */
+ public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus,
+ String explanation,
+ @Nullable Map<String, Decision> nodeDecisions) {
+ Objects.requireNonNull(explanation, "explanation must not be null");
+ Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
+ return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions);
+ }
+
+ /**
+ * Creates a THROTTLE decision with the given explanation and individual node-level decisions that
+ * comprised the final THROTTLE decision.
+ */
+ public static UnassignedShardDecision throttleDecision(String explanation,
+ Map<String, Decision> nodeDecisions) {
+ Objects.requireNonNull(explanation, "explanation must not be null");
+ return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
+ nodeDecisions);
+ }
+
+ /**
+ * Creates a YES decision with the given explanation and individual node-level decisions that
+ * comprised the final YES decision, along with the node id to which the shard is assigned and
+ * the allocation id for the shard, if available.
+ */
+ public static UnassignedShardDecision yesDecision(String explanation,
+ String assignedNodeId,
+ @Nullable String allocationId,
+ Map<String, Decision> nodeDecisions) {
+ Objects.requireNonNull(explanation, "explanation must not be null");
+ Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
+ return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions);
+ }
+
+ /**
+ * Returns <code>true</code> if a decision was taken by the allocator, {@code false} otherwise.
+ * If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
+ */
+ public boolean isDecisionTaken() {
+ return finalDecision != null;
+ }
+
+ /**
+ * Returns the final decision made by the allocator on whether to assign the unassigned shard.
+ * This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}.
+ */
+ @Nullable
+ public Decision getFinalDecision() {
+ return finalDecision;
+ }
+
+ /**
+ * Returns the final decision made by the allocator on whether to assign the unassigned shard.
+ * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
+ * throw an {@code IllegalArgumentException}.
+ */
+ public Decision getFinalDecisionSafe() {
+ if (isDecisionTaken() == false) {
+ throw new IllegalArgumentException("decision must have been taken in order to return the final decision");
+ }
+ return finalDecision;
+ }
+
+ /**
+ * Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if
+ * no decision was taken or if the decision was {@link Decision.Type#YES}.
+ */
+ @Nullable
+ public AllocationStatus getAllocationStatus() {
+ return allocationStatus;
+ }
+
+ /**
+ * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
+ */
+ @Nullable
+ public String getFinalExplanation() {
+ return finalExplanation;
+ }
+
+ /**
+ * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
+ * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
+ * throw an {@code IllegalArgumentException}.
+ */
+ public String getFinalExplanationSafe() {
+ if (isDecisionTaken() == false) {
+ throw new IllegalArgumentException("decision must have been taken in order to return the final explanation");
+ }
+ return finalExplanation;
+ }
+
+ /**
+ * Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns
+ * a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
+ */
+ @Nullable
+ public String getAssignedNodeId() {
+ return assignedNodeId;
+ }
+
+ /**
+ * Gets the allocation id for the existing shard copy that the allocator is assigning the shard to.
+ * This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value
+ * and the node on which the shard is assigned already has a shard copy with an in-sync allocation id
+ * that we can re-use.
+ */
+ @Nullable
+ public String getAllocationId() {
+ return allocationId;
+ }
+
+ /**
+ * Gets the individual node-level decisions that went into making the final decision as represented by
+ * {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision}
+ * as the decision for the given node.
+ */
+ @Nullable
+ public Map<String, Decision> getNodeDecisions() {
+ return nodeDecisions;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
index c86e256bd6..50c2d28cf0 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -209,7 +209,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* A {@link Balancer}
*/
public static class Balancer {
- private final ESLogger logger;
+ private final Logger logger;
private final Map<String, ModelNode> nodes = new HashMap<>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
@@ -219,7 +219,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final MetaData metaData;
private final float avgShardsPerNode;
- public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
+ public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;
@@ -792,10 +792,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
long shardSize = allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
- if (logger.isTraceEnabled()) {
- logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
+ logger.debug("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
minNode.getNodeId());
- }
/* now allocate on the cluster */
minNode.addShard(routingNodes.relocateShard(candidate, minNode.getNodeId(), shardSize, allocation.changes()).v1());
return true;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
index f9b5a398ba..90a591b119 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
@@ -20,6 +20,7 @@
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -187,7 +188,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
* @param shardRouting the shard routing that is to be matched in unassigned shards
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) {
- initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null);
+ initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, null);
}
/**
@@ -198,16 +199,19 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
* @param routingNode the node to initialize it to
* @param shardRouting the shard routing that is to be matched in unassigned shards
* @param unassignedInfo unassigned info to override
+ * @param recoverySource recovery source to override
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
- ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo) {
+ ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo,
+ @Nullable RecoverySource recoverySource) {
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
ShardRouting unassigned = it.next();
if (!unassigned.equalsIgnoringMetaData(shardRouting)) {
continue;
}
- if (unassignedInfo != null) {
- unassigned = it.updateUnassignedInfo(unassignedInfo, allocation.changes());
+ if (unassignedInfo != null || recoverySource != null) {
+ unassigned = it.updateUnassigned(unassignedInfo != null ? unassignedInfo : unassigned.unassignedInfo(),
+ recoverySource != null ? recoverySource : unassigned.recoverySource(), allocation.changes());
}
it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
return;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
index d07db78e94..82d6f436d2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
@@ -20,6 +20,8 @@
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -115,21 +117,20 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned");
}
- if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && acceptDataLoss == false) {
+ if (shardRouting.recoverySource().getType() != RecoverySource.Type.EMPTY_STORE && acceptDataLoss == false) {
return explainOrThrowRejectedCommand(explain, allocation,
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
UnassignedInfo unassignedInfoToUpdate = null;
- if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
- // we need to move the unassigned info back to treat it as if it was index creation
- unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
+ if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY) {
+ unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY,
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false,
shardRouting.unassignedInfo().getLastAllocationStatus());
}
- initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
+ initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate, StoreRecoverySource.EMPTY_STORE_INSTANCE);
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java
index 5fad78d036..acdd5cae30 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java
@@ -19,8 +19,8 @@
package org.elasticsearch.cluster.routing.allocation.command;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -120,8 +120,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
- final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
- if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
+ if (shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
return explainOrThrowRejectedCommand(explain, allocation,
"trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java
index 841b374e87..986613e5a4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java
@@ -26,6 +26,9 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
import java.util.Set;
/**
@@ -34,16 +37,11 @@ import java.util.Set;
*/
public class AllocationDeciders extends AllocationDecider {
- private final AllocationDecider[] allocations;
+ private final Collection<AllocationDecider> allocations;
- public AllocationDeciders(Settings settings, AllocationDecider[] allocations) {
+ public AllocationDeciders(Settings settings, Collection<AllocationDecider> allocations) {
super(settings);
- this.allocations = allocations;
- }
-
- @Inject
- public AllocationDeciders(Settings settings, Set<AllocationDecider> allocations) {
- this(settings, allocations.toArray(new AllocationDecider[allocations.size()]));
+ this.allocations = Collections.unmodifiableCollection(allocations);
}
@Override
@@ -76,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider {
// short track if a NO is returned.
if (decision == Decision.NO) {
if (logger.isTraceEnabled()) {
- logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
+ logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName());
}
// short circuit only if debugging is not enabled
if (!allocation.debugDecision()) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
index 32eaa8ddec..f78dc784d9 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
@@ -19,21 +19,20 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import java.util.HashMap;
+import java.util.Map;
+
import com.carrotsearch.hppc.ObjectIntHashMap;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
-import java.util.HashMap;
-import java.util.Map;
-
/**
* This {@link AllocationDecider} controls shard allocation based on
* <tt>awareness</tt> key-value pairs defined in the node configuration.
@@ -50,7 +49,7 @@ import java.util.Map;
* To enable allocation awareness in this example nodes should contain a value
* for the <tt>rack_id</tt> key like:
* <pre>
- * node.rack_id:1
+ * node.attr.rack_id:1
* </pre>
* <p>
* Awareness can also be used to prevent over-allocation in the case of node or
@@ -104,7 +103,6 @@ public class AwarenessAllocationDecider extends AllocationDecider {
this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
- @Inject
public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
index 740c99016d..c343d4254c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
@@ -19,16 +19,15 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import java.util.Locale;
+
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
-import java.util.Locale;
-
/**
* This {@link AllocationDecider} controls re-balancing operations based on the
* cluster wide active shard state. This decided can not be configured in
@@ -85,7 +84,6 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
private volatile ClusterRebalanceType type;
- @Inject
public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
try {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
index 2c46f7bd54..dd3ece10dd 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
@@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -48,7 +47,6 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
Property.Dynamic, Property.NodeScope);
private volatile int clusterConcurrentRebalance;
- @Inject
public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index 2aacac0cdd..53d3dd2903 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@@ -33,7 +34,6 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -69,7 +69,6 @@ public class DiskThresholdDecider extends AllocationDecider {
private final DiskThresholdSettings diskThresholdSettings;
- @Inject
public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings);
@@ -79,11 +78,10 @@ public class DiskThresholdDecider extends AllocationDecider {
* Returns the size of all shards that are currently being relocated to
* the node, but may not be finished transferring yet.
*
- * If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
- * of all shards
+ * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards
*/
static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation,
- boolean subtractShardsMovingAway, String dataPath) {
+ boolean subtractShardsMovingAway, String dataPath) {
ClusterInfo clusterInfo = allocation.clusterInfo();
long totalSize = 0;
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
@@ -112,7 +110,9 @@ public class DiskThresholdDecider extends AllocationDecider {
final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow();
final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh();
- DiskUsage usage = getDiskUsage(node, allocation, usages);
+ // subtractLeavingShards is passed as false here, because they still use disk space, and therefore should we should be extra careful
+ // and take the size into account
+ DiskUsage usage = getDiskUsage(node, allocation, usages, false);
// First, check that the node currently over the low watermark
double freeDiskPercentage = usage.getFreeDiskAsPercentage();
// Cache the used disk percentage for displaying disk percentages consistent with documentation
@@ -122,14 +122,14 @@ public class DiskThresholdDecider extends AllocationDecider {
logger.trace("node [{}] has {}% used disk", node.nodeId(), usedDiskPercentage);
}
- // a flag for whether the primary shard has been previously allocated
- IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
- boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
+ // flag that determines whether the low threshold checks below can be skipped. We use this for a primary shard that is freshly
+ // allocated and empty.
+ boolean skipLowTresholdChecks = shardRouting.primary() &&
+ shardRouting.active() == false && shardRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE;
// checks for exact byte comparisons
- if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().bytes()) {
- // If the shard is a replica or has a primary that has already been allocated before, check the low threshold
- if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
+ if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) {
+ if (skipLowTresholdChecks == false) {
if (logger.isDebugEnabled()) {
logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
@@ -137,7 +137,7 @@ public class DiskThresholdDecider extends AllocationDecider {
return allocation.decision(Decision.NO, NAME,
"the node is above the low watermark and has less than required [%s] free, free: [%s]",
diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes));
- } else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().bytes()) {
+ } else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
// Allow the shard to be allocated because it is primary that
// has never been allocated if it's under the high watermark
if (logger.isDebugEnabled()) {
@@ -164,8 +164,8 @@ public class DiskThresholdDecider extends AllocationDecider {
// checks for percentage comparisons
if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdLow()) {
- // If the shard is a replica or has a primary that has already been allocated before, check the low threshold
- if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
+ // If the shard is a replica or is a non-empty primary, check the low threshold
+ if (skipLowTresholdChecks == false) {
if (logger.isDebugEnabled()) {
logger.debug("more than the allowed {} used disk threshold ({} used) on node [{}], preventing allocation",
Strings.format1Decimals(usedDiskThresholdLow, "%"),
@@ -205,7 +205,7 @@ public class DiskThresholdDecider extends AllocationDecider {
final long shardSize = getExpectedShardSize(shardRouting, allocation, 0);
double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
long freeBytesAfterShard = freeBytes - shardSize;
- if (freeBytesAfterShard < diskThresholdSettings.getFreeBytesThresholdHigh().bytes()) {
+ if (freeBytesAfterShard < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
logger.warn("after allocating, node [{}] would have less than the required " +
"{} free bytes threshold ({} bytes free), preventing allocation",
node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard);
@@ -244,7 +244,9 @@ public class DiskThresholdDecider extends AllocationDecider {
return decision;
}
- final DiskUsage usage = getDiskUsage(node, allocation, usages);
+ // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk
+ // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check.
+ final DiskUsage usage = getDiskUsage(node, allocation, usages, true);
final String dataPath = clusterInfo.getDataPath(shardRouting);
// If this node is already above the high threshold, the shard cannot remain (get it off!)
final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
@@ -256,7 +258,7 @@ public class DiskThresholdDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME,
"this shard is not allocated on the most utilized disk and can remain");
}
- if (freeBytes < diskThresholdSettings.getFreeBytesThresholdHigh().bytes()) {
+ if (freeBytes < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
if (logger.isDebugEnabled()) {
logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain",
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
@@ -281,7 +283,8 @@ public class DiskThresholdDecider extends AllocationDecider {
"there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
}
- private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
+ private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation,
+ ImmutableOpenMap<String, DiskUsage> usages, boolean subtractLeavingShards) {
DiskUsage usage = usages.get(node.nodeId());
if (usage == null) {
// If there is no usage, and we have other nodes in the cluster,
@@ -294,7 +297,7 @@ public class DiskThresholdDecider extends AllocationDecider {
}
if (diskThresholdSettings.includeRelocations()) {
- long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, true, usage.getPath());
+ long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath());
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(),
usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
if (logger.isTraceEnabled()) {
@@ -380,12 +383,13 @@ public class DiskThresholdDecider extends AllocationDecider {
public static long getExpectedShardSize(ShardRouting shard, RoutingAllocation allocation, long defaultValue) {
final IndexMetaData metaData = allocation.metaData().getIndexSafe(shard.index());
final ClusterInfo info = allocation.clusterInfo();
- if (metaData.getMergeSourceIndex() != null && shard.allocatedPostIndexCreate(metaData) == false) {
+ if (metaData.getMergeSourceIndex() != null && shard.active() == false &&
+ shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
// in the shrink index case we sum up the source index shards since we basically make a copy of the shard in
// the worst case
long targetShardSize = 0;
final Index mergeSourceIndex = metaData.getMergeSourceIndex();
- final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(metaData.getMergeSourceIndex());
+ final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(mergeSourceIndex);
final Set<ShardId> shardIds = IndexMetaData.selectShrinkShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards());
for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) {
if (shardIds.contains(shardRoutingTable.shardId())) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
index 7dc8eff3eb..64bf594214 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
@@ -19,18 +19,18 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import java.util.Locale;
+
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
-import java.util.Locale;
-
/**
* This allocation decider allows shard allocations / rebalancing via the cluster wide settings
* {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting
@@ -79,7 +79,6 @@ public class EnableAllocationDecider extends AllocationDecider {
private volatile Rebalance enableRebalance;
private volatile Allocation enableAllocation;
- @Inject
public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings);
@@ -115,7 +114,8 @@ public class EnableAllocationDecider extends AllocationDecider {
case NONE:
return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
case NEW_PRIMARIES:
- if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
+ if (shardRouting.primary() && shardRouting.active() == false &&
+ shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
} else {
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
index 63bd588114..a42db129da 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
@@ -21,10 +21,10 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -75,7 +75,6 @@ public class FilterAllocationDecider extends AllocationDecider {
private volatile DiscoveryNodeFilters clusterIncludeFilters;
private volatile DiscoveryNodeFilters clusterExcludeFilters;
- @Inject
public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings));
@@ -94,8 +93,8 @@ public class FilterAllocationDecider extends AllocationDecider {
// this is a setting that can only be set within the system!
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
DiscoveryNodeFilters initialRecoveryFilters = indexMd.getInitialRecoveryFilters();
- if (shardRouting.allocatedPostIndexCreate(indexMd) == false &&
- initialRecoveryFilters != null &&
+ if (initialRecoveryFilters != null &&
+ RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) &&
initialRecoveryFilters.match(node.node()) == false) {
return allocation.decision(Decision.NO, NAME, "node does not match index initial recovery filters [%s]",
indexMd.includeFilters());
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
index 74db79d36b..395d347232 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
@@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -49,7 +48,6 @@ public class MaxRetryAllocationDecider extends AllocationDecider {
*
* @param settings {@link Settings} used by this {@link AllocationDecider}
*/
- @Inject
public MaxRetryAllocationDecider(Settings settings) {
super(settings);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
index 9d9f78c3c1..52a5184032 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
@@ -19,12 +19,12 @@
package org.elasticsearch.cluster.routing.allocation.decider;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
/**
@@ -38,7 +38,6 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
public static final String NAME = "node_version";
- @Inject
public NodeVersionAllocationDecider(Settings settings) {
super(settings);
}
@@ -47,12 +46,12 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (shardRouting.primary()) {
if (shardRouting.currentNodeId() == null) {
- if (shardRouting.restoreSource() != null) {
+ if (shardRouting.recoverySource() != null && shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) {
// restoring from a snapshot - check that the node can handle the version
- return isVersionCompatible(shardRouting.restoreSource(), node, allocation);
+ return isVersionCompatible((SnapshotRecoverySource)shardRouting.recoverySource(), node, allocation);
} else {
- // fresh primary, we can allocate wherever
- return allocation.decision(Decision.YES, NAME, "the primary shard is new and can be allocated anywhere");
+ // existing or fresh primary on the node
+ return allocation.decision(Decision.YES, NAME, "the primary shard is new or already existed on the node");
}
} else {
// relocating primary, only migrate to newer host
@@ -85,14 +84,14 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
}
}
- private Decision isVersionCompatible(RestoreSource restoreSource, final RoutingNode target, RoutingAllocation allocation) {
- if (target.node().getVersion().onOrAfter(restoreSource.version())) {
+ private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target, RoutingAllocation allocation) {
+ if (target.node().getVersion().onOrAfter(recoverySource.version())) {
/* we can allocate if we can restore from a snapshot that is older or on the same version */
return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than snapshot version [%s]",
- target.node().getVersion(), restoreSource.version());
+ target.node().getVersion(), recoverySource.version());
} else {
return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s]",
- target.node().getVersion(), restoreSource.version());
+ target.node().getVersion(), recoverySource.version());
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
index b6c675597c..d8042f18a2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
@@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
/**
@@ -31,7 +30,6 @@ public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider
public static final String NAME = "rebalance_only_when_active";
- @Inject
public RebalanceOnlyWhenActiveAllocationDecider(Settings settings) {
super(settings);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
index c23f093092..4cceb1cc16 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
@@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
/**
@@ -32,7 +31,6 @@ public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecide
private static final String NAME = "replica_after_primary_active";
- @Inject
public ReplicaAfterPrimaryActiveAllocationDecider(Settings settings) {
super(settings);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
index fca8a34936..3f2921dfcd 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
@@ -23,14 +23,14 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
/**
* An allocation decider that prevents multiple instances of the same shard to
* be allocated on the same <tt>node</tt>.
*
- * The {@value #SAME_HOST_SETTING} setting allows to perform a check to prevent
+ * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent
* allocation of multiple instances of the same shard on a single <tt>host</tt>,
* based on host name and host address. Defaults to `false`, meaning that no
* check is performed by default.
@@ -45,15 +45,15 @@ public class SameShardAllocationDecider extends AllocationDecider {
public static final String NAME = "same_shard";
- public static final String SAME_HOST_SETTING = "cluster.routing.allocation.same_shard.host";
+ public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING =
+ Setting.boolSetting("cluster.routing.allocation.same_shard.host", false, Setting.Property.NodeScope);
private final boolean sameHost;
- @Inject
public SameShardAllocationDecider(Settings settings) {
super(settings);
- this.sameHost = settings.getAsBoolean(SAME_HOST_SETTING, false);
+ this.sameHost = CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.get(settings);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
index e1741c1af7..aa4fe3d593 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
@@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -72,8 +71,6 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1,
Property.Dynamic, Property.NodeScope);
-
- @Inject
public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
index e25a4e690d..bd9bf35a68 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
@@ -23,7 +23,6 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -63,7 +62,6 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
- @Inject
public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index b880b04f3d..df2e1d1223 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -19,11 +19,11 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -80,8 +80,6 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
private volatile int concurrentIncomingRecoveries;
private volatile int concurrentOutgoingRecoveries;
-
- @Inject
public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings);
@@ -114,7 +112,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (shardRouting.primary() && shardRouting.unassigned()) {
- assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery() == false;
+ assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() != RecoverySource.Type.PEER;
// primary is unassigned, means we are going to do recovery from store, snapshot or local shards
// count *just the primaries* currently doing recovery on the node and check against primariesInitialRecoveries
@@ -135,7 +133,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
}
} else {
// Peer recovery
- assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery();
+ assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER;
// Allocating a shard to this node will increase the incoming recoveries
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
index 21e2defd9b..e981313f40 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
@@ -19,6 +19,9 @@
package org.elasticsearch.cluster.service;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
@@ -43,7 +46,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
@@ -554,9 +556,16 @@ public class ClusterService extends AbstractLifecycleComponent {
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) {
- logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime,
- previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(),
- previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
+ executionTime,
+ previousClusterState.version(),
+ tasksSummary,
+ previousClusterState.nodes().prettyPrint(),
+ previousClusterState.routingTable().prettyPrint(),
+ previousClusterState.getRoutingNodes().prettyPrint()),
+ e);
}
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder()
@@ -587,7 +596,9 @@ public class ClusterService extends AbstractLifecycleComponent {
executionResult.handle(
() -> proccessedListeners.add(updateTask),
ex -> {
- logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor));
+ logger.debug(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
updateTask.listener.onFailure(updateTask.source, ex);
}
);
@@ -670,7 +681,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) {
- logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version());
+ final long version = newClusterState.version();
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
+ t);
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
return;
}
@@ -713,7 +728,10 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
} catch (Exception e) {
- logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode());
+ final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
+ e);
}
}
@@ -724,7 +742,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
executor.clusterStatePublished(clusterChangedEvent);
} catch (Exception e) {
- logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "exception thrown while notifying executor of new cluster state publication [{}]",
+ tasksSummary),
+ e);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
@@ -733,8 +755,18 @@ public class ClusterService extends AbstractLifecycleComponent {
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
- logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime,
- newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint());
+ final long version = newClusterState.version();
+ final String stateUUID = newClusterState.stateUUID();
+ final String prettyPrint = newClusterState.prettyPrint();
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
+ executionTime,
+ version,
+ stateUUID,
+ tasksSummary,
+ prettyPrint),
+ e);
// TODO: do we want to call updateTask.onFailure here?
}
@@ -743,7 +775,7 @@ public class ClusterService extends AbstractLifecycleComponent {
// this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();}
- private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
+ private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) {
if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
} else {
@@ -753,9 +785,9 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
private final ClusterStateTaskListener listener;
- private final ESLogger logger;
+ private final Logger logger;
- public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) {
+ public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
this.listener = listener;
this.logger = logger;
}
@@ -766,7 +798,9 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.onFailure(source, e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.error("exception thrown by listener notifying of failure from [{}]", inner, source);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "exception thrown by listener notifying of failure from [{}]", source), inner);
}
}
@@ -775,7 +809,9 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
listener.onNoLongerMaster(source);
} catch (Exception e) {
- logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "exception thrown by listener while notifying no longer master from [{}]", source), e);
}
}
@@ -785,21 +821,22 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.clusterStateProcessed(source, oldState, newState);
} catch (Exception e) {
logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
- "{}\nnew cluster state:\n{}",
- e,
+ "{}\nnew cluster state:\n{}",
source,
oldState.prettyPrint(),
- newState.prettyPrint());
+ newState.prettyPrint()),
+ e);
}
}
}
private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener {
private final AckedClusterStateTaskListener listener;
- private final ESLogger logger;
+ private final Logger logger;
- public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) {
+ public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) {
super(listener, logger);
this.listener = listener;
this.logger = logger;
@@ -996,7 +1033,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class AckCountDownListener implements Discovery.AckListener {
- private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
+ private static final Logger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateTaskListener ackedTaskListener;
private final CountDown countDown;
@@ -1040,7 +1077,10 @@ public class ClusterService extends AbstractLifecycleComponent {
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
} else {
this.lastFailure = e;
- logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
+ e);
}
if (countDown.countDown()) {
diff --git a/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java
index c6f23f72f9..dc6a88447b 100644
--- a/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java
+++ b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java
@@ -16,13 +16,14 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.common;
+package org.elasticsearch.common;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
+
/**
* Annotation to suppress logging usage checks errors inside a whole class or a method.
*/
diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
index ad9fe257cf..725535ecad 100644
--- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
+++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
@@ -46,7 +46,7 @@ public class FsBlobStore extends AbstractComponent implements BlobStore {
super(settings);
this.path = path;
Files.createDirectories(path);
- this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
+ this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes();
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java
index 813a12571e..2386f79c07 100644
--- a/core/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java
+++ b/core/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java
@@ -55,7 +55,7 @@ public class URLBlobStore extends AbstractComponent implements BlobStore {
public URLBlobStore(Settings settings, URL path) {
super(settings);
this.path = path;
- this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.uri.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
+ this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.uri.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes();
}
/**
diff --git a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java
index e6c67dbe9b..68bf52e9e0 100644
--- a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java
+++ b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java
@@ -19,7 +19,7 @@
package org.elasticsearch.common.breaker;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.indices.breaker.BreakerSettings;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
@@ -36,7 +36,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
- private final ESLogger logger;
+ private final Logger logger;
private final HierarchyCircuitBreakerService parent;
private final String name;
@@ -48,7 +48,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param parent parent circuit breaker service to delegate tripped breakers to
* @param name the name of the breaker
*/
- public ChildMemoryCircuitBreaker(BreakerSettings settings, ESLogger logger,
+ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger,
HierarchyCircuitBreakerService parent, String name) {
this(settings, null, logger, parent, name);
}
@@ -64,7 +64,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker,
- ESLogger logger, HierarchyCircuitBreakerService parent, String name) {
+ Logger logger, HierarchyCircuitBreakerService parent, String name) {
this.name = name;
this.settings = settings;
this.memoryBytesLimit = settings.getLimit();
diff --git a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java
index 7dbdd7d6a6..23e76a9fd3 100644
--- a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java
+++ b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.common.breaker;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import java.util.concurrent.atomic.AtomicLong;
@@ -33,7 +33,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
- private final ESLogger logger;
+ private final Logger logger;
/**
@@ -43,7 +43,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param limit circuit breaker limit
* @param overheadConstant constant multiplier for byte estimations
*/
- public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) {
+ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, Logger logger) {
this(limit, overheadConstant, null, logger);
}
@@ -56,8 +56,8 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param overheadConstant constant multiplier for byte estimations
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
- public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) {
- this.memoryBytesLimit = limit.bytes();
+ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, Logger logger) {
+ this.memoryBytesLimit = limit.getBytes();
this.overheadConstant = overheadConstant;
if (oldBreaker == null) {
this.used = new AtomicLong(0);
diff --git a/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java
index fa49a80123..8cb51f2b06 100644
--- a/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java
+++ b/core/src/main/java/org/elasticsearch/common/component/AbstractComponent.java
@@ -19,19 +19,17 @@
package org.elasticsearch.common.component;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
-/**
- *
- */
public abstract class AbstractComponent {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final DeprecationLogger deprecationLogger;
protected final Settings settings;
@@ -42,7 +40,7 @@ public abstract class AbstractComponent {
}
public AbstractComponent(Settings settings, Class customClass) {
- this.logger = Loggers.getLogger(customClass, settings);
+ this.logger = LogManager.getLogger(customClass);
this.deprecationLogger = new DeprecationLogger(logger);
this.settings = settings;
}
@@ -71,4 +69,5 @@ public abstract class AbstractComponent {
deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName);
}
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
index 504fc41313..15e2fb4fab 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
@@ -19,8 +19,15 @@
package org.elasticsearch.common.geo;
+import org.apache.lucene.document.LatLonDocValuesField;
+import org.apache.lucene.document.LatLonPoint;
+import org.apache.lucene.geo.GeoEncodingUtils;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BitUtil;
+import org.apache.lucene.util.BytesRef;
+
+import java.util.Arrays;
import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
@@ -88,6 +95,24 @@ public final class GeoPoint {
return this;
}
+ // todo this is a crutch because LatLonPoint doesn't have a helper for returning .stringValue()
+ // todo remove with next release of lucene
+ public GeoPoint resetFromIndexableField(IndexableField field) {
+ if (field instanceof LatLonPoint) {
+ BytesRef br = field.binaryValue();
+ byte[] bytes = Arrays.copyOfRange(br.bytes, br.offset, br.length);
+ return this.reset(
+ GeoEncodingUtils.decodeLatitude(bytes, 0),
+ GeoEncodingUtils.decodeLongitude(bytes, Integer.BYTES));
+ } else if (field instanceof LatLonDocValuesField) {
+ long encoded = (long)(field.numericValue());
+ return this.reset(
+ GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)),
+ GeoEncodingUtils.decodeLongitude((int)encoded));
+ }
+ return resetFromIndexHash(Long.parseLong(field.stringValue()));
+ }
+
public GeoPoint resetFromGeoHash(String geohash) {
final long hash = mortonEncode(geohash);
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
index 84d02eb4ec..cb2f8bb4e7 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -19,20 +19,15 @@
package org.elasticsearch.common.geo.builders;
-import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
-import org.locationtech.spatial4j.exception.InvalidShapeException;
-import org.locationtech.spatial4j.shape.Shape;
-import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
-
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.DistanceUnit.Distance;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -40,6 +35,10 @@ import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
+import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
+import org.locationtech.spatial4j.exception.InvalidShapeException;
+import org.locationtech.spatial4j.shape.Shape;
+import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import java.io.IOException;
import java.util.ArrayList;
@@ -53,7 +52,7 @@ import java.util.Locale;
*/
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable {
- protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
+ protected static final Logger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG;
static {
diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java
index 30911def55..bfb084dd47 100644
--- a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java
+++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java
@@ -16,6 +16,7 @@
package org.elasticsearch.common.inject.spi;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Binding;
@@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.PrivateElementsImpl;
import org.elasticsearch.common.inject.internal.ProviderMethodsModule;
import org.elasticsearch.common.inject.internal.SourceProvider;
import org.elasticsearch.common.inject.matcher.Matcher;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.lang.annotation.Annotation;
@@ -351,7 +351,7 @@ public final class Elements {
return builder;
}
- private static ESLogger logger = Loggers.getLogger(Elements.class);
+ private static Logger logger = Loggers.getLogger(Elements.class);
protected Object getSource() {
Object ret;
diff --git a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java
index 46175c0f66..943368059d 100644
--- a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java
+++ b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java
@@ -19,8 +19,8 @@
package org.elasticsearch.common.io;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.IOUtils;
-import org.elasticsearch.common.logging.ESLogger;
import java.io.BufferedReader;
import java.io.IOException;
@@ -101,7 +101,7 @@ public final class FileSystemUtils {
* Check that a directory exists, is a directory and is readable
* by the current user
*/
- public static boolean isAccessibleDirectory(Path directory, ESLogger logger) {
+ public static boolean isAccessibleDirectory(Path directory, Logger logger) {
assert directory != null && logger != null;
if (!Files.exists(directory)) {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
index 1a3f57052d..8fde972e8e 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
@@ -25,10 +25,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import org.elasticsearch.plugins.Plugin;
/**
* A registry for {@link org.elasticsearch.common.io.stream.Writeable.Reader} readers of {@link NamedWriteable}.
@@ -47,7 +43,7 @@ public class NamedWriteableRegistry {
/** A name for the writeable which is unique to the {@link #categoryClass}. */
public final String name;
- /** A reader captability of reading*/
+ /** A reader capability of reading*/
public final Writeable.Reader<?> reader;
/** Creates a new entry which can be stored by the registry. */
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index 4c5f9757ca..794ed6f36f 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -431,27 +431,35 @@ public abstract class StreamInput extends InputStream {
return map;
}
- @Nullable
- @SuppressWarnings("unchecked")
- public Map<String, Object> readMap() throws IOException {
- return (Map<String, Object>) readGenericValue();
- }
-
/**
- * Read a map of strings to string lists.
+ * Read a {@link Map} of {@code K}-type keys to {@code V}-type {@link List}s.
+ * <pre><code>
+ * Map&lt;String, List&lt;String&gt;&gt; map = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
+ * </code></pre>
+ *
+ * @param keyReader The key reader
+ * @param valueReader The value reader
+ * @return Never {@code null}.
*/
- public Map<String, List<String>> readMapOfLists() throws IOException {
- int size = readVInt();
+ public <K, V> Map<K, List<V>> readMapOfLists(final Writeable.Reader<K> keyReader, final Writeable.Reader<V> valueReader)
+ throws IOException {
+ final int size = readVInt();
if (size == 0) {
return Collections.emptyMap();
}
- Map<String, List<String>> map = new HashMap<>(size);
+ final Map<K, List<V>> map = new HashMap<>(size);
for (int i = 0; i < size; ++i) {
- map.put(readString(), readList(StreamInput::readString));
+ map.put(keyReader.read(this), readList(valueReader));
}
return map;
}
+ @Nullable
+ @SuppressWarnings("unchecked")
+ public Map<String, Object> readMap() throws IOException {
+ return (Map<String, Object>) readGenericValue();
+ }
+
@SuppressWarnings({"unchecked"})
@Nullable
public Object readGenericValue() throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
index 24350936fa..0584d37960 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -32,6 +32,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.io.stream.Writeable.Writer;
import org.elasticsearch.common.text.Text;
import org.joda.time.DateTimeZone;
import org.joda.time.ReadableInstant;
@@ -51,6 +52,7 @@ import java.nio.file.NotDirectoryException;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -413,25 +415,52 @@ public abstract class StreamOutput extends OutputStream {
}
/**
- * Writes a map of strings to string lists.
+ * write map to stream with consistent order
+ * to make sure every map generated bytes order are same.
+ * This method is compatible with {@code StreamInput.readMap} and {@code StreamInput.readGenericValue}
+ * This method only will handle the map keys order, not maps contained within the map
*/
- public void writeMapOfLists(Map<String, List<String>> map) throws IOException {
+ public void writeMapWithConsistentOrder(@Nullable Map<String, ? extends Object> map)
+ throws IOException {
+ if (map == null) {
+ writeByte((byte) -1);
+ return;
+ }
+ assert false == (map instanceof LinkedHashMap);
+ this.writeByte((byte) 10);
+ this.writeVInt(map.size());
+ Iterator<? extends Map.Entry<String, ?>> iterator =
+ map.entrySet().stream().sorted((a, b) -> a.getKey().compareTo(b.getKey())).iterator();
+ while (iterator.hasNext()) {
+ Map.Entry<String, ?> next = iterator.next();
+ this.writeString(next.getKey());
+ this.writeGenericValue(next.getValue());
+ }
+ }
+
+ /**
+ * Write a {@link Map} of {@code K}-type keys to {@code V}-type {@link List}s.
+ * <pre><code>
+ * Map&lt;String, List&lt;String&gt;&gt; map = ...;
+ * out.writeMapOfLists(map, StreamOutput::writeString, StreamOutput::writeString);
+ * </code></pre>
+ *
+ * @param keyWriter The key writer
+ * @param valueWriter The value writer
+ */
+ public <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer<K> keyWriter, final Writer<V> valueWriter)
+ throws IOException {
writeVInt(map.size());
- for (Map.Entry<String, List<String>> entry : map.entrySet()) {
- writeString(entry.getKey());
+ for (final Map.Entry<K, List<V>> entry : map.entrySet()) {
+ keyWriter.write(this, entry.getKey());
writeVInt(entry.getValue().size());
- for (String v : entry.getValue()) {
- writeString(v);
+ for (final V value : entry.getValue()) {
+ valueWriter.write(this, value);
}
}
}
- @FunctionalInterface
- interface Writer {
- void write(StreamOutput o, Object value) throws IOException;
- }
-
private static final Map<Class<?>, Writer> WRITERS;
static {
@@ -549,6 +578,12 @@ public abstract class StreamOutput extends OutputStream {
WRITERS = Collections.unmodifiableMap(writers);
}
+ /**
+ * Notice: when serialization a map, the stream out map with the stream in map maybe have the
+ * different key-value orders, they will maybe have different stream order.
+ * If want to keep stream out map and stream in map have the same stream order when stream,
+ * can use {@code writeMapWithConsistentOrder}
+ */
public void writeGenericValue(@Nullable Object value) throws IOException {
if (value == null) {
writeByte((byte) -1);
@@ -837,6 +872,16 @@ public abstract class StreamOutput extends OutputStream {
}
/**
+ * Writes a list of strings
+ */
+ public void writeStringList(List<String> list) throws IOException {
+ writeVInt(list.size());
+ for (String string: list) {
+ this.writeString(string);
+ }
+ }
+
+ /**
* Writes a list of {@link NamedWriteable} objects.
*/
public void writeNamedWriteableList(List<? extends NamedWriteable> list) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
index 16497533e2..30607f3375 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
@@ -25,26 +25,69 @@ import java.io.IOException;
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged.
- *
+ * <p>
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
* so this isn't always possible.
*/
public interface Writeable {
+
/**
* Write this into the {@linkplain StreamOutput}.
*/
- void writeTo(StreamOutput out) throws IOException;
+ void writeTo(final StreamOutput out) throws IOException;
+
+ /**
+ * Reference to a method that can write some object to a {@link StreamOutput}.
+ * <p>
+ * By convention this is a method from {@link StreamOutput} itself (e.g., {@link StreamOutput#writeString}). If the value can be
+ * {@code null}, then the "optional" variant of methods should be used!
+ * <p>
+ * Most classes should implement {@link Writeable} and the {@link Writeable#writeTo(StreamOutput)} method should <em>use</em>
+ * {@link StreamOutput} methods directly or this indirectly:
+ * <pre><code>
+ * public void writeTo(StreamOutput out) throws IOException {
+ * out.writeVInt(someValue);
+ * out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString);
+ * }
+ * </code></pre>
+ */
+ @FunctionalInterface
+ interface Writer<V> {
+
+ /**
+ * Write {@code V}-type {@code value} to the {@code out}put stream.
+ *
+ * @param out Output to write the {@code value} too
+ * @param value The value to add
+ */
+ void write(final StreamOutput out, final V value) throws IOException;
+
+ }
/**
* Reference to a method that can read some object from a stream. By convention this is a constructor that takes
* {@linkplain StreamInput} as an argument for most classes and a static method for things like enums. Returning null from one of these
* is always wrong - for that we use methods like {@link StreamInput#readOptionalWriteable(Reader)}.
+ * <p>
+ * As most classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should
+ * look like:
+ * <pre><code>
+ * public MyClass(final StreamInput in) throws IOException {
+ * this.someValue = in.readVInt();
+ * this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
+ * }
+ * </code></pre>
*/
@FunctionalInterface
- interface Reader<R> {
+ interface Reader<V> {
+
/**
- * Read R from a stream.
+ * Read {@code V}-type value from a stream.
+ *
+ * @param in Input to read the value from
*/
- R read(StreamInput in) throws IOException;
+ V read(final StreamInput in) throws IOException;
+
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java b/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java
deleted file mode 100644
index 7c33389974..0000000000
--- a/core/src/main/java/org/elasticsearch/common/logging/ConsoleAppender.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.logging;
-
-import org.apache.log4j.Layout;
-import org.apache.log4j.WriterAppender;
-import org.apache.log4j.helpers.LogLog;
-import org.elasticsearch.common.SuppressForbidden;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * ConsoleAppender appends log events to <code>System.out</code> or
- * <code>System.err</code> using a layout specified by the user. The
- * default target is <code>System.out</code>.
- * <p>Elasticsearch: Adapter from log4j to allow to disable console logging...</p>
- *
- * @author Ceki G&uuml;lc&uuml;
- * @author Curt Arnold
- * @since 1.1
- */
-public class ConsoleAppender extends WriterAppender {
-
- public static final String SYSTEM_OUT = "System.out";
- public static final String SYSTEM_ERR = "System.err";
-
- protected String target = SYSTEM_OUT;
-
- /**
- * Determines if the appender honors reassignments of System.out
- * or System.err made after configuration.
- */
- private boolean follow = true;
-
- /**
- * Constructs an unconfigured appender.
- */
- public ConsoleAppender() {
- }
-
- /**
- * Creates a configured appender.
- *
- * @param layout layout, may not be null.
- */
- public ConsoleAppender(Layout layout) {
- this(layout, SYSTEM_OUT);
- }
-
- /**
- * Creates a configured appender.
- *
- * @param layout layout, may not be null.
- * @param target target, either "System.err" or "System.out".
- */
- public ConsoleAppender(Layout layout, String target) {
- setLayout(layout);
- setTarget(target);
- activateOptions();
- }
-
- /**
- * Sets the value of the <b>Target</b> option. Recognized values
- * are "System.out" and "System.err". Any other value will be
- * ignored.
- */
- public void setTarget(String value) {
- String v = value.trim();
-
- if (SYSTEM_OUT.equalsIgnoreCase(v)) {
- target = SYSTEM_OUT;
- } else if (SYSTEM_ERR.equalsIgnoreCase(v)) {
- target = SYSTEM_ERR;
- } else {
- targetWarn(value);
- }
- }
-
- /**
- * Returns the current value of the <b>Target</b> property. The
- * default value of the option is "System.out".
- * <p>
- * See also {@link #setTarget}.
- */
- public String getTarget() {
- return target;
- }
-
- /**
- * Sets whether the appender honors reassignments of System.out
- * or System.err made after configuration.
- *
- * @param newValue if true, appender will use value of System.out or
- * System.err in force at the time when logging events are appended.
- * @since 1.2.13
- */
- public final void setFollow(final boolean newValue) {
- follow = newValue;
- }
-
- /**
- * Gets whether the appender honors reassignments of System.out
- * or System.err made after configuration.
- *
- * @return true if appender will use value of System.out or
- * System.err in force at the time when logging events are appended.
- * @since 1.2.13
- */
- public final boolean getFollow() {
- return follow;
- }
-
- void targetWarn(String val) {
- LogLog.warn("[" + val + "] should be System.out or System.err.");
- LogLog.warn("Using previously set target, System.out by default.");
- }
-
- /**
- * Prepares the appender for use.
- */
- @Override
- @SuppressForbidden(reason = "System#out")
- public void activateOptions() {
- if (follow) {
- if (target.equals(SYSTEM_ERR)) {
- setWriter(createWriter(new SystemErrStream()));
- } else {
- setWriter(createWriter(new SystemOutStream()));
- }
- } else {
- if (target.equals(SYSTEM_ERR)) {
- setWriter(createWriter(System.err));
- } else {
- setWriter(createWriter(System.out));
- }
- }
-
- super.activateOptions();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected
- final void closeWriter() {
- if (follow) {
- super.closeWriter();
- }
- }
-
-
- /**
- * An implementation of OutputStream that redirects to the
- * current System.err.
- */
- @SuppressForbidden(reason = "System#err")
- private static class SystemErrStream extends OutputStream {
- public SystemErrStream() {
- }
-
- @Override
- public void close() {
- }
-
- @Override
- public void flush() {
- System.err.flush();
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.err.write(b);
- }
-
- @Override
-
- public void write(final byte[] b, final int off, final int len)
- throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.err.write(b, off, len);
- }
-
- @Override
- public void write(final int b) throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.err.write(b);
- }
- }
-
- /**
- * An implementation of OutputStream that redirects to the
- * current System.out.
- */
- @SuppressForbidden(reason = "System#err")
- private static class SystemOutStream extends OutputStream {
- public SystemOutStream() {
- }
-
- @Override
- public void close() {
- }
-
- @Override
- public void flush() {
- System.out.flush();
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.out.write(b);
- }
-
- @Override
- public void write(final byte[] b, final int off, final int len)
- throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.out.write(b, off, len);
- }
-
- @Override
- public void write(final int b) throws IOException {
- if (!Loggers.consoleLoggingEnabled()) {
- return;
- }
- System.out.write(b);
- }
- }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java
index 5970f91732..d9b811585d 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java
+++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java
@@ -19,6 +19,9 @@
package org.elasticsearch.common.logging;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -31,6 +34,8 @@ import java.util.concurrent.CopyOnWriteArraySet;
*/
public class DeprecationLogger {
+ private final Logger logger;
+
/**
* The "Warning" Header comes from RFC-7234. As the RFC describes, it's generally used for caching purposes, but it can be
* used for <em>any</em> warning.
@@ -84,22 +89,20 @@ public class DeprecationLogger {
}
}
- private final ESLogger logger;
-
/**
* Creates a new deprecation logger based on the parent logger. Automatically
* prefixes the logger name with "deprecation", if it starts with "org.elasticsearch.",
* it replaces "org.elasticsearch" with "org.elasticsearch.deprecation" to maintain
* the "org.elasticsearch" namespace.
*/
- public DeprecationLogger(ESLogger parentLogger) {
+ public DeprecationLogger(Logger parentLogger) {
String name = parentLogger.getName();
if (name.startsWith("org.elasticsearch")) {
name = name.replace("org.elasticsearch.", "org.elasticsearch.deprecation.");
} else {
name = "deprecation." + name;
}
- this.logger = ESLoggerFactory.getLogger(parentLogger.getPrefix(), name);
+ this.logger = LogManager.getLogger(name, parentLogger.getMessageFactory());
}
/**
@@ -113,29 +116,27 @@ public class DeprecationLogger {
* Logs a deprecated message to the deprecation log, as well as to the local {@link ThreadContext}.
*
* @param threadContexts The node's {@link ThreadContext} (outside of concurrent tests, this should only ever have one context).
- * @param msg The deprecation message.
+ * @param message The deprecation message.
* @param params The parameters used to fill in the message, if any exist.
*/
@SuppressLoggerChecks(reason = "safely delegates to logger")
- void deprecated(Set<ThreadContext> threadContexts, String msg, Object... params) {
+ void deprecated(Set<ThreadContext> threadContexts, String message, Object... params) {
Iterator<ThreadContext> iterator = threadContexts.iterator();
if (iterator.hasNext()) {
- final String formattedMsg = LoggerMessageFormat.format(msg, params);
+ final String formattedMessage = LoggerMessageFormat.format(message, params);
while (iterator.hasNext()) {
try {
- iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMsg);
+ iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMessage);
} catch (IllegalStateException e) {
// ignored; it should be removed shortly
}
}
-
- logger.debug(formattedMsg);
+ logger.warn(formattedMessage);
} else {
- logger.debug(msg, params);
+ logger.warn(message, params);
}
-
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java b/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java
deleted file mode 100644
index b2a2aa333c..0000000000
--- a/core/src/main/java/org/elasticsearch/common/logging/ESLogger.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.logging;
-
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.elasticsearch.common.SuppressLoggerChecks;
-
-import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
-
-/**
- * Elasticsearch's logger wrapper.
- */
-@SuppressLoggerChecks(reason = "safely delegates to itself")
-public class ESLogger {
- private static final String FQCN = ESLogger.class.getName();
-
- private final String prefix;
- private final Logger logger;
-
- public ESLogger(String prefix, Logger logger) {
- this.prefix = prefix;
- this.logger = logger;
- }
-
- /**
- * The prefix of the log.
- */
- public String getPrefix() {
- return this.prefix;
- }
-
- /**
- * Fetch the underlying logger so we can look at it. Only exists for testing.
- */
- Logger getLogger() {
- return logger;
- }
-
- /**
- * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
- * level.
- */
- public void setLevel(String level) {
- if (level == null) {
- logger.setLevel(null);
- } else if ("error".equalsIgnoreCase(level)) {
- logger.setLevel(Level.ERROR);
- } else if ("warn".equalsIgnoreCase(level)) {
- logger.setLevel(Level.WARN);
- } else if ("info".equalsIgnoreCase(level)) {
- logger.setLevel(Level.INFO);
- } else if ("debug".equalsIgnoreCase(level)) {
- logger.setLevel(Level.DEBUG);
- } else if ("trace".equalsIgnoreCase(level)) {
- logger.setLevel(Level.TRACE);
- }
- }
-
- /**
- * The level of this logger. If null then the logger is inheriting it's level from its nearest ancestor with a non-null level.
- */
- public String getLevel() {
- if (logger.getLevel() == null) {
- return null;
- }
- return logger.getLevel().toString();
- }
-
- /**
- * The name of this logger.
- */
- public String getName() {
- return logger.getName();
- }
-
- /**
- * Returns {@code true} if a TRACE level message should be logged.
- */
- public boolean isTraceEnabled() {
- return logger.isTraceEnabled();
- }
-
- /**
- * Returns {@code true} if a DEBUG level message should be logged.
- */
- public boolean isDebugEnabled() {
- return logger.isDebugEnabled();
- }
-
- /**
- * Returns {@code true} if an INFO level message should be logged.
- */
- public boolean isInfoEnabled() {
- return logger.isInfoEnabled();
- }
-
- /**
- * Returns {@code true} if a WARN level message should be logged.
- */
- public boolean isWarnEnabled() {
- return logger.isEnabledFor(Level.WARN);
- }
-
- /**
- * Returns {@code true} if an ERROR level message should be logged.
- */
- public boolean isErrorEnabled() {
- return logger.isEnabledFor(Level.ERROR);
- }
-
- /**
- * Logs a TRACE level message.
- */
- public void trace(String msg, Object... params) {
- trace(msg, null, params);
- }
-
- /**
- * Logs a TRACE level message with an exception.
- */
- public void trace(String msg, Throwable cause, Object... params) {
- if (isTraceEnabled()) {
- logger.log(FQCN, Level.TRACE, format(prefix, msg, params), cause);
- }
- }
-
- /**
- * Logs a DEBUG level message.
- */
- public void debug(String msg, Object... params) {
- debug(msg, null, params);
- }
-
- /**
- * Logs a DEBUG level message with an exception.
- */
- public void debug(String msg, Throwable cause, Object... params) {
- if (isDebugEnabled()) {
- logger.log(FQCN, Level.DEBUG, format(prefix, msg, params), cause);
- }
- }
-
- /**
- * Logs a INFO level message.
- */
- public void info(String msg, Object... params) {
- info(msg, null, params);
- }
-
- /**
- * Logs a INFO level message with an exception.
- */
- public void info(String msg, Throwable cause, Object... params) {
- if (isInfoEnabled()) {
- logger.log(FQCN, Level.INFO, format(prefix, msg, params), cause);
- }
- }
-
- /**
- * Logs a WARN level message.
- */
- public void warn(String msg, Object... params) {
- warn(msg, null, params);
- }
-
- /**
- * Logs a WARN level message with an exception.
- */
- public void warn(String msg, Throwable cause, Object... params) {
- if (isWarnEnabled()) {
- logger.log(FQCN, Level.WARN, format(prefix, msg, params), cause);
- }
- }
-
- /**
- * Logs a ERROR level message.
- */
- public void error(String msg, Object... params) {
- error(msg, null, params);
- }
-
- /**
- * Logs a ERROR level message with an exception.
- */
- public void error(String msg, Throwable cause, Object... params) {
- if (isErrorEnabled()) {
- logger.log(FQCN, Level.ERROR, format(prefix, msg, params), cause);
- }
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java
index c0951c47df..548c1da5a8 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java
+++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java
@@ -19,53 +19,50 @@
package org.elasticsearch.common.logging;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.spi.ExtendedLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
-import java.util.Locale;
-
/**
- * Factory to get {@link ESLogger}s
+ * Factory to get {@link Logger}s
*/
-public abstract class ESLoggerFactory {
+public final class ESLoggerFactory {
- public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
- new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope);
- public static final Setting<LogLevel> LOG_LEVEL_SETTING =
- Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
- Property.Dynamic, Property.NodeScope);
+ private ESLoggerFactory() {
- public static ESLogger getLogger(String prefix, String name) {
- prefix = prefix == null ? null : prefix.intern();
- name = name.intern();
- return new ESLogger(prefix, Logger.getLogger(name));
}
- public static ESLogger getLogger(String name) {
- return getLogger(null, name);
+ public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
+ new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
+ public static final Setting<Level> LOG_LEVEL_SETTING =
+ Setting.prefixKeySetting("logger.", Level.INFO.name(), Level::valueOf,
+ Property.Dynamic, Property.NodeScope);
+
+ public static Logger getLogger(String prefix, String name) {
+ return getLogger(prefix, LogManager.getLogger(name));
}
- public static DeprecationLogger getDeprecationLogger(String name) {
- return new DeprecationLogger(getLogger(name));
+ public static Logger getLogger(String prefix, Class<?> clazz) {
+ return getLogger(prefix, LogManager.getLogger(clazz));
}
- public static DeprecationLogger getDeprecationLogger(String prefix, String name) {
- return new DeprecationLogger(getLogger(prefix, name));
+ public static Logger getLogger(String prefix, Logger logger) {
+ return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
}
- public static ESLogger getRootLogger() {
- return new ESLogger(null, Logger.getRootLogger());
+ public static Logger getLogger(Class<?> clazz) {
+ return getLogger(null, clazz);
}
- private ESLoggerFactory() {
- // Utility class can't be built.
+ public static Logger getLogger(String name) {
+ return getLogger(null, name);
}
- public enum LogLevel {
- WARN, TRACE, INFO, DEBUG, ERROR;
- public static LogLevel parse(String level) {
- return valueOf(level.toUpperCase(Locale.ROOT));
- }
+ public static Logger getRootLogger() {
+ return LogManager.getRootLogger();
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
index e203999d33..22d08202f9 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
+++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
@@ -19,12 +19,22 @@
package org.elasticsearch.common.logging;
-import org.apache.log4j.Java9Hack;
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.lucene.util.Constants;
-import org.elasticsearch.ElasticsearchException;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.AbstractConfiguration;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilder;
+import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilderFactory;
+import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
+import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
+import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
+import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
+import org.elasticsearch.cli.ExitCodes;
+import org.elasticsearch.cli.UserException;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
import java.io.IOException;
@@ -34,144 +44,101 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
-import java.util.Arrays;
+import java.util.ArrayList;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Properties;
+import java.util.Objects;
import java.util.Set;
-import static java.util.Collections.unmodifiableMap;
-import static org.elasticsearch.common.Strings.cleanPath;
-
-/**
- * Configures log4j with a special set of replacements.
- */
public class LogConfigurator {
- static final List<String> ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties");
-
- private static final Map<String, String> REPLACEMENTS;
- static {
- Map<String, String> replacements = new HashMap<>();
- // Appenders
- replacements.put("async", "org.apache.log4j.AsyncAppender");
- replacements.put("console", ConsoleAppender.class.getName());
- replacements.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender");
- replacements.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender");
- replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
- replacements.put("file", "org.apache.log4j.FileAppender");
- replacements.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender");
- replacements.put("jms", "org.apache.log4j.net.JMSAppender");
- replacements.put("lf5", "org.apache.log4j.lf5.LF5Appender");
- replacements.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender");
- replacements.put("null", "org.apache.log4j.NullAppender");
- replacements.put("rollingFile", "org.apache.log4j.RollingFileAppender");
- replacements.put("smtp", "org.apache.log4j.net.SMTPAppender");
- replacements.put("socket", "org.apache.log4j.net.SocketAppender");
- replacements.put("socketHub", "org.apache.log4j.net.SocketHubAppender");
- replacements.put("syslog", "org.apache.log4j.net.SyslogAppender");
- replacements.put("telnet", "org.apache.log4j.net.TelnetAppender");
- replacements.put("terminal", TerminalAppender.class.getName());
-
- // Policies
- replacements.put("timeBased", "org.apache.log4j.rolling.TimeBasedRollingPolicy");
- replacements.put("sizeBased", "org.apache.log4j.rolling.SizeBasedTriggeringPolicy");
-
- // Layouts
- replacements.put("simple", "org.apache.log4j.SimpleLayout");
- replacements.put("html", "org.apache.log4j.HTMLLayout");
- replacements.put("pattern", "org.apache.log4j.PatternLayout");
- replacements.put("consolePattern", "org.apache.log4j.PatternLayout");
- replacements.put("enhancedPattern", "org.apache.log4j.EnhancedPatternLayout");
- replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
- replacements.put("xml", "org.apache.log4j.XMLLayout");
- REPLACEMENTS = unmodifiableMap(replacements);
-
- if (Constants.JRE_IS_MINIMUM_JAVA9) {
- Java9Hack.fixLog4j();
- }
+ /**
+ * Configure logging without reading a log4j2.properties file, effectively configuring the
+ * status logger and all loggers to the console.
+ *
+ * @param settings for configuring logger.level and individual loggers
+ */
+ public static void configureWithoutConfig(final Settings settings) {
+ Objects.requireNonNull(settings);
+ // we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
+ configureStatusLogger();
+ configureLoggerLevels(settings);
}
- private static boolean loaded;
-
/**
- * Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
- * @param settings custom settings that should be applied
- * @param resolveConfig controls whether the logging conf file should be read too or not.
+ * Configure logging reading from any log4j2.properties found in the config directory and its
+ * subdirectories from the specified environment. Will also configure logging to point the logs
+ * directory from the specified environment.
+ *
+ * @param environment the environment for reading configs and the logs path
+ * @throws IOException if there is an issue readings any log4j2.properties in the config
+ * directory
+ * @throws UserException if there are no log4j2.properties in the specified configs path
*/
- public static void configure(Settings settings, boolean resolveConfig) {
- if (loaded) {
- return;
- }
- loaded = true;
- // TODO: this is partly a copy of InternalSettingsPreparer...we should pass in Environment and not do all this...
- Environment environment = new Environment(settings);
-
- Settings.Builder settingsBuilder = Settings.builder();
- if (resolveConfig) {
- resolveConfig(environment, settingsBuilder);
- }
+ public static void configure(final Environment environment) throws IOException, UserException {
+ Objects.requireNonNull(environment);
+ configure(environment.settings(), environment.configFile(), environment.logsFile());
+ }
- // add custom settings after config was added so that they are not overwritten by config
- settingsBuilder.put(settings);
- settingsBuilder.replacePropertyPlaceholders();
- Properties props = new Properties();
- for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) {
- String key = "log4j." + entry.getKey();
- String value = entry.getValue();
- value = REPLACEMENTS.getOrDefault(value, value);
- if (key.endsWith(".value")) {
- props.setProperty(key.substring(0, key.length() - ".value".length()), value);
- } else if (key.endsWith(".type")) {
- props.setProperty(key.substring(0, key.length() - ".type".length()), value);
- } else {
- props.setProperty(key, value);
+ private static void configure(final Settings settings, final Path configsPath, final Path logsPath) throws IOException, UserException {
+ Objects.requireNonNull(settings);
+ Objects.requireNonNull(configsPath);
+ Objects.requireNonNull(logsPath);
+
+ setLogConfigurationSystemProperty(logsPath, settings);
+ // we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
+ configureStatusLogger();
+
+ final LoggerContext context = (LoggerContext) LogManager.getContext(false);
+
+ final List<AbstractConfiguration> configurations = new ArrayList<>();
+ final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory();
+ final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
+ Files.walkFileTree(configsPath, options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ if (file.getFileName().toString().equals("log4j2.properties")) {
+ configurations.add((PropertiesConfiguration) factory.getConfiguration(file.toString(), file.toUri()));
+ }
+ return FileVisitResult.CONTINUE;
}
+ });
+
+ if (configurations.isEmpty()) {
+ throw new UserException(
+ ExitCodes.CONFIG,
+ "no log4j2.properties found; tried [" + configsPath + "] and its subdirectories");
}
- // ensure explicit path to logs dir exists
- props.setProperty("log4j.path.logs", cleanPath(environment.logsFile().toAbsolutePath().toString()));
- PropertyConfigurator.configure(props);
+
+ context.start(new CompositeConfiguration(configurations));
+
+ configureLoggerLevels(settings);
}
- /**
- * sets the loaded flag to false so that logging configuration can be
- * overridden. Should only be used in tests.
- */
- static void reset() {
- loaded = false;
+ private static void configureStatusLogger() {
+ final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
+ builder.setStatusLevel(Level.ERROR);
+ Configurator.initialize(builder.build());
}
- static void resolveConfig(Environment env, final Settings.Builder settingsBuilder) {
-
- try {
- Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
- Files.walkFileTree(env.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
- @Override
- public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
- String fileName = file.getFileName().toString();
- if (fileName.startsWith("logging.")) {
- for (String allowedSuffix : ALLOWED_SUFFIXES) {
- if (fileName.endsWith(allowedSuffix)) {
- loadConfig(file, settingsBuilder);
- break;
- }
- }
- }
- return FileVisitResult.CONTINUE;
- }
- });
- } catch (IOException ioe) {
- throw new ElasticsearchException("Failed to load logging configuration", ioe);
+ private static void configureLoggerLevels(Settings settings) {
+ if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
+ final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
+ Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
}
- }
- static void loadConfig(Path file, Settings.Builder settingsBuilder) {
- try {
- settingsBuilder.loadFromPath(file);
- } catch (IOException | SettingsException | NoClassDefFoundError e) {
- // ignore
+ final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
+ for (String key : levels.keySet()) {
+ final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
+ Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level);
}
}
+
+
+ @SuppressForbidden(reason = "sets system property for logging configuration")
+ private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) {
+ System.setProperty("es.logs", logsPath.resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString());
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java
index 4a938e38a2..812a0b70f2 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java
+++ b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java
@@ -19,19 +19,28 @@
package org.elasticsearch.common.logging;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.message.MessageFactory;
import org.elasticsearch.common.Classes;
-import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.Node;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import static java.util.Arrays.asList;
+import static javax.security.auth.login.Configuration.getConfiguration;
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
/**
@@ -39,95 +48,68 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
- private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
-
public static final String SPACE = " ";
- private static boolean consoleLoggingEnabled = true;
-
- public static void disableConsoleLogging() {
- consoleLoggingEnabled = false;
- }
-
- public static void enableConsoleLogging() {
- consoleLoggingEnabled = true;
- }
-
- public static boolean consoleLoggingEnabled() {
- return consoleLoggingEnabled;
- }
-
- public static ESLogger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
+ public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
/**
- * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of
+ * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of
* Class.
*/
- public static ESLogger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
+ public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings,
- asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
+ asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
- public static ESLogger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
+ public static Logger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0]));
}
- public static ESLogger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
- return getLogger(buildClassLoggerName(clazz), settings, prefixes);
+ public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
+ final List<String> prefixesList = prefixesList(settings, prefixes);
+ return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
}
- @SuppressForbidden(reason = "using localhost for logging on which host it is is fine")
- private static InetAddress getHostAddress() {
- try {
- return InetAddress.getLocalHost();
- } catch (UnknownHostException e) {
- return null;
- }
+ public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
+ final List<String> prefixesList = prefixesList(settings, prefixes);
+ return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
}
- @SuppressForbidden(reason = "do not know what this method does")
- public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
+ private static List<String> prefixesList(Settings settings, String... prefixes) {
List<String> prefixesList = new ArrayList<>();
- if (settings.getAsBoolean("logger.logHostAddress", false)) {
- final InetAddress addr = getHostAddress();
- if (addr != null) {
- prefixesList.add(addr.getHostAddress());
- }
- }
- if (settings.getAsBoolean("logger.logHostName", false)) {
- final InetAddress addr = getHostAddress();
- if (addr != null) {
- prefixesList.add(addr.getHostName());
- }
- }
if (Node.NODE_NAME_SETTING.exists(settings)) {
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
}
if (prefixes != null && prefixes.length > 0) {
prefixesList.addAll(asList(prefixes));
}
- return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
+ return prefixesList;
+ }
+
+ public static Logger getLogger(Logger parentLogger, String s) {
+ assert parentLogger instanceof PrefixLogger;
+ return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
}
- public static ESLogger getLogger(ESLogger parentLogger, String s) {
- return ESLoggerFactory.getLogger(parentLogger.getPrefix(), getLoggerName(parentLogger.getName() + s));
+ public static Logger getLogger(String s) {
+ return ESLoggerFactory.getLogger(s);
}
- public static ESLogger getLogger(String s) {
- return ESLoggerFactory.getLogger(getLoggerName(s));
+ public static Logger getLogger(Class<?> clazz) {
+ return ESLoggerFactory.getLogger(clazz);
}
- public static ESLogger getLogger(Class<?> clazz) {
- return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
+ public static Logger getLogger(Class<?> clazz, String... prefixes) {
+ return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
- public static ESLogger getLogger(Class<?> clazz, String... prefixes) {
- return getLogger(buildClassLoggerName(clazz), prefixes);
+ public static Logger getLogger(String name, String... prefixes) {
+ return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
}
- public static ESLogger getLogger(String name, String... prefixes) {
+ private static String formatPrefix(String... prefixes) {
String prefix = null;
if (prefixes != null && prefixes.length > 0) {
StringBuilder sb = new StringBuilder();
@@ -145,21 +127,78 @@ public class Loggers {
prefix = sb.toString();
}
}
- return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
+ return prefix;
}
- private static String buildClassLoggerName(Class<?> clazz) {
- String name = clazz.getName();
- if (name.startsWith("org.elasticsearch.")) {
- name = Classes.getPackageName(clazz);
+ /**
+ * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
+ * level.
+ */
+ public static void setLevel(Logger logger, String level) {
+ final Level l;
+ if (level == null) {
+ l = null;
+ } else {
+ l = Level.valueOf(level);
+ }
+ setLevel(logger, l);
+ }
+
+ public static void setLevel(Logger logger, Level level) {
+ if (!LogManager.ROOT_LOGGER_NAME.equals(logger.getName())) {
+ Configurator.setLevel(logger.getName(), level);
+ } else {
+ final LoggerContext ctx = LoggerContext.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+ loggerConfig.setLevel(level);
+ ctx.updateLoggers();
+ }
+
+ // we have to descend the hierarchy
+ final LoggerContext ctx = LoggerContext.getContext(false);
+ for (final LoggerConfig loggerConfig : ctx.getConfiguration().getLoggers().values()) {
+ if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) || loggerConfig.getName().startsWith(logger.getName() + ".")) {
+ Configurator.setLevel(loggerConfig.getName(), level);
+ }
+ }
+ }
+
+ public static void addAppender(final Logger logger, final Appender appender) {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ config.addAppender(appender);
+ LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+ if (!logger.getName().equals(loggerConfig.getName())) {
+ loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true);
+ config.addLogger(logger.getName(), loggerConfig);
}
- return name;
+ loggerConfig.addAppender(appender, null, null);
+ ctx.updateLoggers();
}
- private static String getLoggerName(String name) {
- if (name.startsWith("org.elasticsearch.")) {
- name = name.substring("org.elasticsearch.".length());
+ public static void removeAppender(final Logger logger, final Appender appender) {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+ if (!logger.getName().equals(loggerConfig.getName())) {
+ loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true);
+ config.addLogger(logger.getName(), loggerConfig);
}
- return commonPrefix + name;
+ loggerConfig.removeAppender(appender.getName());
+ ctx.updateLoggers();
}
+
+ public static Appender findAppender(final Logger logger, final Class<? extends Appender> clazz) {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+ for (final Map.Entry<String, Appender> entry : loggerConfig.getAppenders().entrySet()) {
+ if (entry.getValue().getClass().equals(clazz)) {
+ return entry.getValue();
+ }
+ }
+ return null;
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java
new file mode 100644
index 0000000000..32de2afde3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.Marker;
+import org.apache.logging.log4j.MarkerManager;
+import org.apache.logging.log4j.message.Message;
+import org.apache.logging.log4j.spi.ExtendedLogger;
+import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;
+
+import java.lang.ref.WeakReference;
+import java.util.WeakHashMap;
+
+class PrefixLogger extends ExtendedLoggerWrapper {
+
+ // we can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds
+ // a permanent reference to the marker; however, we have transient markers from index-level and
+ // shard-level components so this would effectively be a memory leak
+ private static final WeakHashMap<String, WeakReference<Marker>> markers = new WeakHashMap<>();
+
+ private final Marker marker;
+
+ public String prefix() {
+ return marker.getName();
+ }
+
+ PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) {
+ super(logger, name, null);
+
+ final String actualPrefix = (prefix == null ? "" : prefix).intern();
+ final Marker actualMarker;
+ // markers is not thread-safe, so we synchronize access
+ synchronized (markers) {
+ final WeakReference<Marker> marker = markers.get(actualPrefix);
+ final Marker maybeMarker = marker == null ? null : marker.get();
+ if (maybeMarker == null) {
+ actualMarker = new MarkerManager.Log4jMarker(actualPrefix);
+ markers.put(actualPrefix, new WeakReference<>(actualMarker));
+ } else {
+ actualMarker = maybeMarker;
+ }
+ }
+ this.marker = actualMarker;
+ }
+
+ @Override
+ public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) {
+ assert marker == null;
+ super.logMessage(fqcn, level, this.marker, message, t);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java b/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java
index 0000a138bd..680760444b 100644
--- a/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java
+++ b/core/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java
@@ -19,25 +19,23 @@
package org.elasticsearch.common.lucene;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.InfoStream;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
/** An InfoStream (for Lucene's IndexWriter) that redirects
* messages to "lucene.iw.ifd" and "lucene.iw" Logger.trace. */
-
public final class LoggerInfoStream extends InfoStream {
- /** Used for component-specific logging: */
- /** Logger for everything */
- private final ESLogger logger;
+ private final Logger parentLogger;
- /** Logger for IndexFileDeleter */
- private final ESLogger ifdLogger;
+ private final Map<String, Logger> loggers = new ConcurrentHashMap<>();
- public LoggerInfoStream(ESLogger parentLogger) {
- logger = Loggers.getLogger(parentLogger, ".lucene.iw");
- ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
+ public LoggerInfoStream(final Logger parentLogger) {
+ this.parentLogger = parentLogger;
}
@Override
@@ -52,15 +50,13 @@ public final class LoggerInfoStream extends InfoStream {
return getLogger(component).isTraceEnabled() && component.equals("TP") == false;
}
- private ESLogger getLogger(String component) {
- if (component.equals("IFD")) {
- return ifdLogger;
- } else {
- return logger;
- }
+ private Logger getLogger(String component) {
+ return loggers.computeIfAbsent(component, c -> Loggers.getLogger(parentLogger, "." + c));
}
@Override
public void close() {
+
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java
index 39e67ce645..ef570be1e7 100644
--- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java
+++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java
@@ -19,11 +19,15 @@
package org.elasticsearch.common.lucene;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
@@ -67,7 +71,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.NamedAnalyzer;
@@ -88,7 +91,7 @@ import java.util.Objects;
public class Lucene {
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
- public static final String LATEST_CODEC = "Lucene60";
+ public static final String LATEST_CODEC = "Lucene62";
static {
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
@@ -104,14 +107,14 @@ public class Lucene {
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f);
- public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
+ public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) {
if (version == null) {
return defaultVersion;
}
try {
return Version.parse(version);
} catch (ParseException e) {
- logger.warn("no version match {}, default to {}", e, version, defaultVersion);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
return defaultVersion;
}
}
@@ -351,6 +354,8 @@ public class Lucene {
return new ScoreDoc(in.readVInt(), in.readFloat());
}
+ private static final Class<?> GEO_DISTANCE_SORT_TYPE_CLASS = LatLonDocValuesField.newDistanceSort("some_geo_field", 0, 0).getClass();
+
public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOException {
if (topDocs instanceof TopFieldDocs) {
out.writeBoolean(true);
@@ -361,6 +366,16 @@ public class Lucene {
out.writeVInt(topFieldDocs.fields.length);
for (SortField sortField : topFieldDocs.fields) {
+ if (sortField.getClass() == GEO_DISTANCE_SORT_TYPE_CLASS) {
+ // for geo sorting, we replace the SortField with a SortField that assumes a double field.
+ // this works since the SortField is only used for merging top docs
+ SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE);
+ newSortField.setMissingValue(sortField.getMissingValue());
+ sortField = newSortField;
+ }
+ if (sortField.getClass() != SortField.class) {
+ throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]");
+ }
if (sortField.getField() == null) {
out.writeBoolean(false);
} else {
diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java
index e32129c938..7fd4cc6d2f 100644
--- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java
+++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java
@@ -19,7 +19,7 @@
package org.elasticsearch.common.network;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@@ -36,7 +36,7 @@ import java.util.Locale;
*/
final class IfConfig {
- private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
+ private static final Logger logger = Loggers.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */
diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
index bb4a4bd3b3..60bb35d8f1 100644
--- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
@@ -28,33 +28,40 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandReg
import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.ExtensionPoint;
-import org.elasticsearch.http.HttpServer;
+import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.tasks.RawTaskStatus;
import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
-import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.TransportInterceptor;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.local.LocalTransport;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.function.Supplier;
/**
* A module to handle registering and binding all network related classes.
*/
-public class NetworkModule extends AbstractModule {
+public final class NetworkModule {
public static final String TRANSPORT_TYPE_KEY = "transport.type";
- public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String LOCAL_TRANSPORT = "local";
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
@@ -65,58 +72,87 @@ public class NetworkModule extends AbstractModule {
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
- public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
- Setting.simpleString(TRANSPORT_SERVICE_TYPE_KEY, Property.NodeScope);
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);
- private final NetworkService networkService;
private final Settings settings;
private final boolean transportClient;
- private final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry();
- private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
- private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
- private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
- private final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
+ private static final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry();
+ private static final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
+ private final Map<String, Supplier<Transport>> transportFactories = new HashMap<>();
+ private final Map<String, Supplier<HttpServerTransport>> transportHttpFactories = new HashMap<>();
+ private final List<TransportInterceptor> transportIntercetors = new ArrayList<>();
+
+ static {
+ registerAllocationCommand(CancelAllocationCommand::new, CancelAllocationCommand::fromXContent,
+ CancelAllocationCommand.COMMAND_NAME_FIELD);
+ registerAllocationCommand(MoveAllocationCommand::new, MoveAllocationCommand::fromXContent,
+ MoveAllocationCommand.COMMAND_NAME_FIELD);
+ registerAllocationCommand(AllocateReplicaAllocationCommand::new, AllocateReplicaAllocationCommand::fromXContent,
+ AllocateReplicaAllocationCommand.COMMAND_NAME_FIELD);
+ registerAllocationCommand(AllocateEmptyPrimaryAllocationCommand::new, AllocateEmptyPrimaryAllocationCommand::fromXContent,
+ AllocateEmptyPrimaryAllocationCommand.COMMAND_NAME_FIELD);
+ registerAllocationCommand(AllocateStalePrimaryAllocationCommand::new, AllocateStalePrimaryAllocationCommand::fromXContent,
+ AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD);
+ namedWriteables.add(
+ new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
+ namedWriteables.add(
+ new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
+ }
/**
* Creates a network module that custom networking classes can be plugged into.
- * @param networkService A constructed network service object to bind.
* @param settings The settings for the node
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
*/
- public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
- this.networkService = networkService;
+ public NetworkModule(Settings settings, boolean transportClient, List<NetworkPlugin> plugins, ThreadPool threadPool,
+ BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
this.settings = settings;
this.transportClient = transportClient;
- registerTransportService("default", TransportService.class);
- registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
- namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
- namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
- registerBuiltinAllocationCommands();
+ registerTransport(LOCAL_TRANSPORT, () -> new LocalTransport(settings, threadPool, namedWriteableRegistry, circuitBreakerService));
+ for (NetworkPlugin plugin : plugins) {
+ if (transportClient == false && HTTP_ENABLED.get(settings)) {
+ Map<String, Supplier<HttpServerTransport>> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays,
+ circuitBreakerService, namedWriteableRegistry, networkService);
+ for (Map.Entry<String, Supplier<HttpServerTransport>> entry : httpTransportFactory.entrySet()) {
+ registerHttpTransport(entry.getKey(), entry.getValue());
+ }
+ }
+ Map<String, Supplier<Transport>> httpTransportFactory = plugin.getTransports(settings, threadPool, bigArrays,
+ circuitBreakerService, namedWriteableRegistry, networkService);
+ for (Map.Entry<String, Supplier<Transport>> entry : httpTransportFactory.entrySet()) {
+ registerTransport(entry.getKey(), entry.getValue());
+ }
+ List<TransportInterceptor> transportInterceptors = plugin.getTransportInterceptors();
+ for (TransportInterceptor interceptor : transportInterceptors) {
+ registerTransportInterceptor(interceptor);
+ }
+ }
}
public boolean isTransportClient() {
return transportClient;
}
- /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
- public void registerTransportService(String name, Class<? extends TransportService> clazz) {
- transportServiceTypes.registerExtension(name, clazz);
- }
-
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
- public void registerTransport(String name, Class<? extends Transport> clazz) {
- transportTypes.registerExtension(name, clazz);
+ private void registerTransport(String key, Supplier<Transport> factory) {
+ if (transportFactories.putIfAbsent(key, factory) != null) {
+ throw new IllegalArgumentException("transport for name: " + key + " is already registered");
+ }
}
/** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */
// TODO: we need another name than "http transport"....so confusing with transportClient...
- public void registerHttpTransport(String name, Class<? extends HttpServerTransport> clazz) {
+ private void registerHttpTransport(String key, Supplier<HttpServerTransport> factory) {
if (transportClient) {
- throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client");
+ throw new IllegalArgumentException("Cannot register http transport " + key + " for transport client");
+ }
+ if (transportHttpFactories.putIfAbsent(key, factory) != null) {
+ throw new IllegalArgumentException("transport for name: " + key + " is already registered");
}
- httpTransportTypes.registerExtension(name, clazz);
}
/**
@@ -129,7 +165,7 @@ public class NetworkModule extends AbstractModule {
* @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because
* it is the name under which the command's reader is registered.
*/
- private <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
+ private static <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
ParseField commandName) {
allocationCommandRegistry.register(parser, commandName);
namedWriteables.add(new Entry(AllocationCommand.class, commandName.getPreferredName(), reader));
@@ -138,47 +174,83 @@ public class NetworkModule extends AbstractModule {
/**
* The registry of allocation command parsers.
*/
- public AllocationCommandRegistry getAllocationCommandRegistry() {
+ public static AllocationCommandRegistry getAllocationCommandRegistry() {
return allocationCommandRegistry;
}
- public List<Entry> getNamedWriteables() {
- return namedWriteables;
+ public static List<Entry> getNamedWriteables() {
+ return Collections.unmodifiableList(namedWriteables);
}
- @Override
- protected void configure() {
- bind(NetworkService.class).toInstance(networkService);
- transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
- transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
-
- if (transportClient == false) {
- if (HTTP_ENABLED.get(settings)) {
- bind(HttpServer.class).asEagerSingleton();
- httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_SETTING.getKey(), HTTP_DEFAULT_TYPE_SETTING.get(settings));
- } else {
- bind(HttpServer.class).toProvider(Providers.of(null));
- }
- // Bind the AllocationCommandRegistry so RestClusterRerouteAction can get it.
- bind(AllocationCommandRegistry.class).toInstance(allocationCommandRegistry);
+ public Supplier<HttpServerTransport> getHttpServerTransportSupplier() {
+ final String name;
+ if (HTTP_TYPE_SETTING.exists(settings)) {
+ name = HTTP_TYPE_SETTING.get(settings);
+ } else {
+ name = HTTP_DEFAULT_TYPE_SETTING.get(settings);
}
+ final Supplier<HttpServerTransport> factory = transportHttpFactories.get(name);
+ if (factory == null) {
+ throw new IllegalStateException("Unsupported http.type [" + name + "]");
+ }
+ return factory;
}
- private void registerBuiltinAllocationCommands() {
- registerAllocationCommand(CancelAllocationCommand::new, CancelAllocationCommand::fromXContent,
- CancelAllocationCommand.COMMAND_NAME_FIELD);
- registerAllocationCommand(MoveAllocationCommand::new, MoveAllocationCommand::fromXContent,
- MoveAllocationCommand.COMMAND_NAME_FIELD);
- registerAllocationCommand(AllocateReplicaAllocationCommand::new, AllocateReplicaAllocationCommand::fromXContent,
- AllocateReplicaAllocationCommand.COMMAND_NAME_FIELD);
- registerAllocationCommand(AllocateEmptyPrimaryAllocationCommand::new, AllocateEmptyPrimaryAllocationCommand::fromXContent,
- AllocateEmptyPrimaryAllocationCommand.COMMAND_NAME_FIELD);
- registerAllocationCommand(AllocateStalePrimaryAllocationCommand::new, AllocateStalePrimaryAllocationCommand::fromXContent,
- AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD);
+ public boolean isHttpEnabled() {
+ return transportClient == false && HTTP_ENABLED.get(settings);
+ }
+ public Supplier<Transport> getTransportSupplier() {
+ final String name;
+ if (TRANSPORT_TYPE_SETTING.exists(settings)) {
+ name = TRANSPORT_TYPE_SETTING.get(settings);
+ } else {
+ name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings);
+ }
+ final Supplier<Transport> factory = transportFactories.get(name);
+ if (factory == null) {
+ throw new IllegalStateException("Unsupported transport.type [" + name + "]");
+ }
+ return factory;
}
- public boolean canRegisterHttpExtensions() {
- return transportClient == false;
+ /**
+ * Registers a new {@link TransportInterceptor}
+ */
+ private void registerTransportInterceptor(TransportInterceptor interceptor) {
+ this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null"));
}
+
+ /**
+ * Returns a composite {@link TransportInterceptor} containing all registered interceptors
+ * @see #registerTransportInterceptor(TransportInterceptor)
+ */
+ public TransportInterceptor getTransportInterceptor() {
+ return new CompositeTransportInterceptor(this.transportIntercetors);
+ }
+
+ static final class CompositeTransportInterceptor implements TransportInterceptor {
+ final List<TransportInterceptor> transportInterceptors;
+
+ private CompositeTransportInterceptor(List<TransportInterceptor> transportInterceptors) {
+ this.transportInterceptors = new ArrayList<>(transportInterceptors);
+ }
+
+ @Override
+ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, TransportRequestHandler<T> actualHandler) {
+ for (TransportInterceptor interceptor : this.transportInterceptors) {
+ actualHandler = interceptor.interceptHandler(action, actualHandler);
+ }
+ return actualHandler;
+ }
+
+ @Override
+ public AsyncSender interceptSender(AsyncSender sender) {
+ for (TransportInterceptor interceptor : this.transportInterceptors) {
+ sender = interceptor.interceptSender(sender);
+ }
+ return sender;
+ }
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java b/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java
index 8652d4c5c0..9e06c39b83 100644
--- a/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java
+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkUtils.java
@@ -32,6 +32,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
+import java.util.Optional;
/**
* Utilities for network interfaces / addresses binding and publishing.
@@ -227,14 +228,15 @@ public abstract class NetworkUtils {
/** Returns addresses for the given interface (it must be marked up) */
static InetAddress[] getAddressesForInterface(String name) throws SocketException {
- NetworkInterface intf = NetworkInterface.getByName(name);
- if (intf == null) {
+ Optional<NetworkInterface> networkInterface = getInterfaces().stream().filter((netIf) -> name.equals(netIf.getName())).findFirst();
+
+ if (networkInterface.isPresent() == false) {
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
}
- if (!intf.isUp()) {
+ if (!networkInterface.get().isUp()) {
throw new IllegalArgumentException("Interface '" + name + "' is not up and running");
}
- List<InetAddress> list = Collections.list(intf.getInetAddresses());
+ List<InetAddress> list = Collections.list(networkInterface.get().getInetAddresses());
if (list.isEmpty()) {
throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses");
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
index 152a5629dd..9a39286009 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
@@ -19,8 +19,9 @@
package org.elasticsearch.common.settings;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.spell.LevensteinDistance;
-import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.collect.Tuple;
@@ -35,7 +36,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
-import java.util.SortedSet;
import java.util.TreeMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
@@ -129,7 +129,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
settingUpdater.getValue(current, previous);
} catch (RuntimeException ex) {
exceptions.add(ex);
- logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
}
}
// here we are exhaustive and record all settings that failed.
@@ -157,7 +157,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
try {
applyRunnables.add(settingUpdater.updater(current, previous));
} catch (Exception ex) {
- logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
throw ex;
}
}
@@ -374,6 +375,11 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
builder.put(setting.getKey(), setting.getRaw(defaultSettings));
}
}
+ for (Setting<?> setting : complexMatchers.values()) {
+ if (setting.exists(source) == false) {
+ builder.put(setting.getKey(), setting.getRaw(defaultSettings));
+ }
+ }
return builder.build();
}
@@ -521,7 +527,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
}
} catch (IllegalArgumentException ex) {
changed = true;
- logger.warn("found invalid setting: {} value: {} - archiving",ex , entry.getKey(), entry.getValue());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex);
/*
* We put them back in here such that tools can check from the outside if there are any indices with broken settings. The
* setting can remain there but we want users to be aware that some of their setting are broken and they can research why
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 3ce483d0fa..b5a0564174 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -39,11 +39,13 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting.Property;
@@ -52,8 +54,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.env.Environment;
@@ -131,7 +133,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) {
builder.putNull(key);
} else {
- builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).name());
+ builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings));
}
}
}
@@ -143,12 +145,18 @@ public final class ClusterSettings extends AbstractScopedSettings {
for (String key : value.getAsMap().keySet()) {
assert loggerPredicate.test(key);
String component = key.substring("logger.".length());
+ if ("level".equals(component)) {
+ continue;
+ }
if ("_root".equals(component)) {
final String rootLevel = value.get(key);
- ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)
- .name() : rootLevel);
+ if (rootLevel == null) {
+ Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
+ } else {
+ Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel);
+ }
} else {
- ESLoggerFactory.getLogger(component).setLevel(value.get(key));
+ Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key));
}
}
}
@@ -198,6 +206,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
+ SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
@@ -217,7 +226,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
NetworkModule.HTTP_TYPE_SETTING,
- NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
NetworkModule.TRANSPORT_TYPE_SETTING,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,
@@ -389,7 +397,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
BootstrapSettings.MEMORY_LOCK_SETTING,
BootstrapSettings.SECCOMP_SETTING,
BootstrapSettings.CTRLHANDLER_SETTING,
- BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS,
IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING,
IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING,
IndexingMemoryController.MAX_INDEX_BUFFER_SIZE_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index 370a6c0766..1cc4f747a9 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -140,6 +140,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
+ EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS,
IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> {
@@ -185,6 +186,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
case IndexMetaData.SETTING_INDEX_UUID:
case IndexMetaData.SETTING_VERSION_CREATED:
case IndexMetaData.SETTING_VERSION_UPGRADED:
+ case IndexMetaData.SETTING_INDEX_PROVIDED_NAME:
case MergePolicyConfig.INDEX_MERGE_ENABLED:
return true;
default:
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
index 920e1d7f5f..a96b47762d 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.common.settings;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
@@ -26,7 +27,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -107,9 +107,6 @@ public class Setting<T> extends ToXContentToBytes {
IndexScope
}
- private static final ESLogger logger = Loggers.getLogger(Setting.class);
- private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
-
private final Key key;
protected final Function<Settings, String> defaultValue;
@Nullable
@@ -322,6 +319,7 @@ public class Setting<T> extends ToXContentToBytes {
// They're using the setting, so we need to tell them to stop
if (this.isDeprecated() && this.exists(settings)) {
// It would be convenient to show its replacement key, but replacement is often not so simple
+ final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass()));
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
"See the breaking changes lists in the documentation for details", getKey());
}
@@ -376,7 +374,7 @@ public class Setting<T> extends ToXContentToBytes {
/**
* Build a new updater with a noop validator.
*/
- final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger) {
+ final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger) {
return newUpdater(consumer, logger, (s) -> {});
}
@@ -384,7 +382,7 @@ public class Setting<T> extends ToXContentToBytes {
* Build the updater responsible for validating new values, logging the new
* value, and eventually setting the value where it belongs.
*/
- AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
+ AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger, Consumer<T> validator) {
if (isDynamic()) {
return new Updater(consumer, logger, validator);
} else {
@@ -397,7 +395,7 @@ public class Setting<T> extends ToXContentToBytes {
* and its usage for details.
*/
static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A, B> consumer,
- final Setting<A> aSetting, final Setting<B> bSetting, ESLogger logger) {
+ final Setting<A> aSetting, final Setting<B> bSetting, Logger logger) {
final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSetting.newUpdater(null, logger);
final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger);
return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() {
@@ -413,6 +411,12 @@ public class Setting<T> extends ToXContentToBytes {
@Override
public void apply(Tuple<A, B> value, Settings current, Settings previous) {
+ if (aSettingUpdater.hasChanged(current, previous)) {
+ logger.info("updating [{}] from [{}] to [{}]", aSetting.key, aSetting.getRaw(previous), aSetting.getRaw(current));
+ }
+ if (bSettingUpdater.hasChanged(current, previous)) {
+ logger.info("updating [{}] from [{}] to [{}]", bSetting.key, bSetting.getRaw(previous), bSetting.getRaw(current));
+ }
consumer.accept(value.v1(), value.v2());
}
@@ -426,10 +430,10 @@ public class Setting<T> extends ToXContentToBytes {
private final class Updater implements AbstractScopedSettings.SettingUpdater<T> {
private final Consumer<T> consumer;
- private final ESLogger logger;
+ private final Logger logger;
private final Consumer<T> accept;
- public Updater(Consumer<T> consumer, ESLogger logger, Consumer<T> accept) {
+ public Updater(Consumer<T> consumer, Logger logger, Consumer<T> accept) {
this.consumer = consumer;
this.logger = logger;
this.accept = accept;
@@ -553,10 +557,6 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
}
- public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
- return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
- }
-
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) {
return byteSizeSetting(key, (s) -> value.toString(), properties);
}
@@ -584,17 +584,56 @@ public class Setting<T> extends ToXContentToBytes {
public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, ByteSizeValue maxValue, String key) {
ByteSizeValue value = ByteSizeValue.parseBytesSizeValue(s, key);
- if (value.bytes() < minValue.bytes()) {
+ if (value.getBytes() < minValue.getBytes()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
- if (value.bytes() > maxValue.bytes()) {
+ if (value.getBytes() > maxValue.getBytes()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue);
}
return value;
}
- public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
- return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
+ /**
+ * Creates a setting which specifies a memory size. This can either be
+ * specified as an absolute bytes value or as a percentage of the heap
+ * memory.
+ *
+ * @param key the key for the setting
+ * @param defaultValue the default value for this setting
+ * @param properties properties properties for this setting like scope, filtering...
+ * @return the setting object
+ */
+ public static Setting<ByteSizeValue> memorySizeSetting(String key, ByteSizeValue defaultValue, Property... properties) {
+ return memorySizeSetting(key, (s) -> defaultValue.toString(), properties);
+ }
+
+
+ /**
+ * Creates a setting which specifies a memory size. This can either be
+ * specified as an absolute bytes value or as a percentage of the heap
+ * memory.
+ *
+ * @param key the key for the setting
+ * @param defaultValue a function that supplies the default value for this setting
+ * @param properties properties properties for this setting like scope, filtering...
+ * @return the setting object
+ */
+ public static Setting<ByteSizeValue> memorySizeSetting(String key, Function<Settings, String> defaultValue, Property... properties) {
+ return new Setting<>(key, defaultValue, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
+ }
+
+ /**
+ * Creates a setting which specifies a memory size. This can either be
+ * specified as an absolute bytes value or as a percentage of the heap
+ * memory.
+ *
+ * @param key the key for the setting
+ * @param defaultPercentage the default value of this setting as a percentage of the heap memory
+ * @param properties properties properties for this setting like scope, filtering...
+ * @return the setting object
+ */
+ public static Setting<ByteSizeValue> memorySizeSetting(String key, String defaultPercentage, Property... properties) {
+ return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
}
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser,
@@ -709,7 +748,7 @@ public class Setting<T> extends ToXContentToBytes {
}
@Override
- public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, ESLogger logger,
+ public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, Logger logger,
Consumer<Settings> validator) {
if (isDynamic() == false) {
throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
@@ -752,9 +791,9 @@ public class Setting<T> extends ToXContentToBytes {
};
}
- public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue,
+ public static Setting<TimeValue> timeSetting(String key, Function<Settings, TimeValue> defaultValue, TimeValue minValue,
Property... properties) {
- return new Setting<>(key, defaultValue, (s) -> {
+ return new Setting<>(key, (s) -> defaultValue.apply(s).getStringRep(), (s) -> {
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
if (timeValue.millis() < minValue.millis()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
@@ -764,17 +803,21 @@ public class Setting<T> extends ToXContentToBytes {
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) {
- return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties);
+ return timeSetting(key, (s) -> defaultValue, minValue, properties);
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, Property... properties) {
- return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties);
+ return new Setting<>(key, (s) -> defaultValue.getStringRep(), (s) -> TimeValue.parseTimeValue(s, key), properties);
}
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, Property... properties) {
return new Setting<>(key, fallbackSetting, (s) -> TimeValue.parseTimeValue(s, key), properties);
}
+ public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
+ return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
+ }
+
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, Property... properties) {
return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
final double d = Double.parseDouble(s);
@@ -833,7 +876,7 @@ public class Setting<T> extends ToXContentToBytes {
}
@Override
- AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
+ AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger, Consumer<T> validator) {
throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating.");
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
index 5fd19c4fc1..60276ce14f 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
@@ -19,9 +19,9 @@
package org.elasticsearch.common.settings;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -51,7 +51,7 @@ public class SettingsModule implements Module {
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.")
&& TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
- private final ESLogger logger;
+ private final Logger logger;
private final IndexScopedSettings indexScopedSettings;
private final ClusterSettings clusterSettings;
diff --git a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java
index 4a159957d5..7a412aac09 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java
@@ -19,16 +19,20 @@
package org.elasticsearch.common.unit;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+
+import java.io.IOException;
+
/**
* A <tt>SizeUnit</tt> represents size at a given unit of
* granularity and provides utility methods to convert across units.
* A <tt>SizeUnit</tt> does not maintain size information, but only
* helps organize and use size representations that may be maintained
* separately across various contexts.
- *
- *
*/
-public enum ByteSizeUnit {
+public enum ByteSizeUnit implements Writeable {
BYTES {
@Override
public long toBytes(long size) {
@@ -225,6 +229,13 @@ public enum ByteSizeUnit {
static final long MAX = Long.MAX_VALUE;
+ public static ByteSizeUnit fromId(int id) {
+ if (id < 0 || id >= values().length) {
+ throw new IllegalArgumentException("No byte size unit found for id [" + id + "]");
+ }
+ return values()[id];
+ }
+
/**
* Scale d by m, checking for overflow.
* This has a short name to make above code more readable.
@@ -235,7 +246,6 @@ public enum ByteSizeUnit {
return d * m;
}
-
public abstract long toBytes(long size);
public abstract long toKB(long size);
@@ -247,4 +257,16 @@ public enum ByteSizeUnit {
public abstract long toTB(long size);
public abstract long toPB(long size);
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(this.ordinal());
+ }
+
+ /**
+ * Reads a {@link ByteSizeUnit} from a given {@link StreamInput}
+ */
+ public static ByteSizeUnit readFrom(StreamInput in) throws IOException {
+ return ByteSizeUnit.fromId(in.readVInt());
+ }
} \ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java
index 32df65850a..7d2be6fee3 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java
@@ -23,146 +23,107 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
-public class ByteSizeValue implements Streamable {
+public class ByteSizeValue implements Writeable {
- private long size;
+ private final long size;
+ private final ByteSizeUnit unit;
- private ByteSizeUnit sizeUnit;
-
- private ByteSizeValue() {
+ public ByteSizeValue(StreamInput in) throws IOException {
+ size = in.readVLong();
+ unit = ByteSizeUnit.BYTES;
+ }
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(getBytes());
}
public ByteSizeValue(long bytes) {
this(bytes, ByteSizeUnit.BYTES);
}
- public ByteSizeValue(long size, ByteSizeUnit sizeUnit) {
+ public ByteSizeValue(long size, ByteSizeUnit unit) {
this.size = size;
- this.sizeUnit = sizeUnit;
+ this.unit = unit;
}
public int bytesAsInt() {
- long bytes = bytes();
+ long bytes = getBytes();
if (bytes > Integer.MAX_VALUE) {
throw new IllegalArgumentException("size [" + toString() + "] is bigger than max int");
}
return (int) bytes;
}
- public long bytes() {
- return sizeUnit.toBytes(size);
- }
-
public long getBytes() {
- return bytes();
- }
-
- public long kb() {
- return sizeUnit.toKB(size);
+ return unit.toBytes(size);
}
public long getKb() {
- return kb();
- }
-
- public long mb() {
- return sizeUnit.toMB(size);
+ return unit.toKB(size);
}
public long getMb() {
- return mb();
- }
-
- public long gb() {
- return sizeUnit.toGB(size);
+ return unit.toMB(size);
}
public long getGb() {
- return gb();
- }
-
- public long tb() {
- return sizeUnit.toTB(size);
+ return unit.toGB(size);
}
public long getTb() {
- return tb();
- }
-
- public long pb() {
- return sizeUnit.toPB(size);
+ return unit.toTB(size);
}
public long getPb() {
- return pb();
- }
-
- public double kbFrac() {
- return ((double) bytes()) / ByteSizeUnit.C1;
+ return unit.toPB(size);
}
public double getKbFrac() {
- return kbFrac();
- }
-
- public double mbFrac() {
- return ((double) bytes()) / ByteSizeUnit.C2;
+ return ((double) getBytes()) / ByteSizeUnit.C1;
}
public double getMbFrac() {
- return mbFrac();
- }
-
- public double gbFrac() {
- return ((double) bytes()) / ByteSizeUnit.C3;
+ return ((double) getBytes()) / ByteSizeUnit.C2;
}
public double getGbFrac() {
- return gbFrac();
- }
-
- public double tbFrac() {
- return ((double) bytes()) / ByteSizeUnit.C4;
+ return ((double) getBytes()) / ByteSizeUnit.C3;
}
public double getTbFrac() {
- return tbFrac();
- }
-
- public double pbFrac() {
- return ((double) bytes()) / ByteSizeUnit.C5;
+ return ((double) getBytes()) / ByteSizeUnit.C4;
}
public double getPbFrac() {
- return pbFrac();
+ return ((double) getBytes()) / ByteSizeUnit.C5;
}
@Override
public String toString() {
- long bytes = bytes();
+ long bytes = getBytes();
double value = bytes;
String suffix = "b";
if (bytes >= ByteSizeUnit.C5) {
- value = pbFrac();
+ value = getPbFrac();
suffix = "pb";
} else if (bytes >= ByteSizeUnit.C4) {
- value = tbFrac();
+ value = getTbFrac();
suffix = "tb";
} else if (bytes >= ByteSizeUnit.C3) {
- value = gbFrac();
+ value = getGbFrac();
suffix = "gb";
} else if (bytes >= ByteSizeUnit.C2) {
- value = mbFrac();
+ value = getMbFrac();
suffix = "mb";
} else if (bytes >= ByteSizeUnit.C1) {
- value = kbFrac();
+ value = getKbFrac();
suffix = "kb";
}
return Strings.format1Decimals(value, suffix);
@@ -172,7 +133,8 @@ public class ByteSizeValue implements Streamable {
return parseBytesSizeValue(sValue, null, settingName);
}
- public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) throws ElasticsearchParseException {
+ public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName)
+ throws ElasticsearchParseException {
settingName = Objects.requireNonNull(settingName);
if (sValue == null) {
return defaultValue;
@@ -210,7 +172,9 @@ public class ByteSizeValue implements Streamable {
bytes = 0;
} else {
// Missing units:
- throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized", settingName, sValue);
+ throw new ElasticsearchParseException(
+ "failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized",
+ settingName, sValue);
}
} catch (NumberFormatException e) {
throw new ElasticsearchParseException("failed to parse [{}]", e, sValue);
@@ -218,23 +182,6 @@ public class ByteSizeValue implements Streamable {
return new ByteSizeValue(bytes, ByteSizeUnit.BYTES);
}
- public static ByteSizeValue readBytesSizeValue(StreamInput in) throws IOException {
- ByteSizeValue sizeValue = new ByteSizeValue();
- sizeValue.readFrom(in);
- return sizeValue;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- size = in.readVLong();
- sizeUnit = ByteSizeUnit.BYTES;
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(bytes());
- }
-
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -246,13 +193,13 @@ public class ByteSizeValue implements Streamable {
ByteSizeValue sizeValue = (ByteSizeValue) o;
- return bytes() == sizeValue.bytes();
+ return getBytes() == sizeValue.getBytes();
}
@Override
public int hashCode() {
int result = Long.hashCode(size);
- result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0);
+ result = 31 * result + (unit != null ? unit.hashCode() : 0);
return result;
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java
index 6a99e06ac0..2830d8318a 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java
@@ -42,7 +42,7 @@ public enum MemorySizeValue {
if (percent < 0 || percent > 100) {
throw new ElasticsearchParseException("percentage should be in [0-100], got [{}]", percentAsString);
}
- return new ByteSizeValue((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().bytes()), ByteSizeUnit.BYTES);
+ return new ByteSizeValue((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()), ByteSizeUnit.BYTES);
} catch (NumberFormatException e) {
throw new ElasticsearchParseException("failed to parse [{}] as a double", e, percentAsString);
}
diff --git a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java
index e04dfe5143..cba51f29ee 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java
@@ -23,22 +23,14 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
-/**
- *
- */
-public class SizeValue implements Streamable {
-
- private long size;
-
- private SizeUnit sizeUnit;
+public class SizeValue implements Writeable {
- private SizeValue() {
-
- }
+ private final long size;
+ private final SizeUnit sizeUnit;
public SizeValue(long singles) {
this(singles, SizeUnit.SINGLE);
@@ -52,6 +44,16 @@ public class SizeValue implements Streamable {
this.sizeUnit = sizeUnit;
}
+ public SizeValue(StreamInput in) throws IOException {
+ size = in.readVLong();
+ sizeUnit = SizeUnit.SINGLE;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(singles());
+ }
+
public long singles() {
return sizeUnit.toSingles(size);
}
@@ -194,23 +196,6 @@ public class SizeValue implements Streamable {
return new SizeValue(singles, SizeUnit.SINGLE);
}
- public static SizeValue readSizeValue(StreamInput in) throws IOException {
- SizeValue sizeValue = new SizeValue();
- sizeValue.readFrom(in);
- return sizeValue;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- size = in.readVLong();
- sizeUnit = SizeUnit.SINGLE;
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(singles());
- }
-
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
index ed67019c10..8f81efb649 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
@@ -249,6 +249,12 @@ public class TimeValue implements Writeable {
return PeriodFormat.getDefault().withParseType(type).print(period);
}
+ /**
+ * Returns a {@link String} representation of the current {@link TimeValue}.
+ *
+ * Note that this method might produce fractional time values (ex 1.6m) which cannot be
+ * parsed by method like {@link TimeValue#parse(String, String, int)}.
+ */
@Override
public String toString() {
if (duration < 0) {
diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
index 221dc23451..2712aef823 100644
--- a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
+++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
@@ -19,9 +19,11 @@
package org.elasticsearch.common.util;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
@@ -41,7 +43,7 @@ import java.nio.file.StandardCopyOption;
public class IndexFolderUpgrader {
private final NodeEnvironment nodeEnv;
private final Settings settings;
- private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class);
+ private final Logger logger = Loggers.getLogger(IndexFolderUpgrader.class);
/**
* Creates a new upgrader instance
@@ -64,8 +66,8 @@ public class IndexFolderUpgrader {
} catch (NoSuchFileException | FileNotFoundException exception) {
// thrown when the source is non-existent because the folder was renamed
// by another node (shared FS) after we checked if the target exists
- logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node",
- exception, target);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " +
+ "upgrading with single node", target), exception);
throw exception;
} finally {
if (success) {
diff --git a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java
index eed357dee7..947aad4873 100644
--- a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java
+++ b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java
@@ -20,7 +20,6 @@
package org.elasticsearch.common.util;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
@@ -29,7 +28,6 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import java.util.Arrays;
@@ -46,7 +44,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final Setting<Type> TYPE_SETTING =
new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING =
- Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
+ Setting.memorySizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
public static final Setting<Double> WEIGHT_BYTES_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope);
public static final Setting<Double> WEIGHT_LONG_SETTING =
@@ -70,7 +68,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
protected PageCacheRecycler(Settings settings) {
super(settings);
final Type type = TYPE_SETTING .get(settings);
- final long limit = LIMIT_HEAP_SETTING .get(settings).bytes();
+ final long limit = LIMIT_HEAP_SETTING .get(settings).getBytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
// We have a global amount of memory that we need to divide across data types.
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java
index 30d7e63ec8..e7a38f1eb6 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java
@@ -18,8 +18,8 @@
*/
package org.elasticsearch.common.util.concurrent;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.Lifecycle;
-import org.elasticsearch.common.logging.ESLogger;
import java.util.Objects;
@@ -36,7 +36,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable {
/**
* The service's logger (note: this is passed in!).
*/
- private final ESLogger logger;
+ private final Logger logger;
/**
* {@link AbstractLifecycleRunnable} must be aware of the actual {@code lifecycle} to react properly.
@@ -45,7 +45,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable {
* @param logger The logger to use when logging
* @throws NullPointerException if any parameter is {@code null}
*/
- public AbstractLifecycleRunnable(Lifecycle lifecycle, ESLogger logger) {
+ public AbstractLifecycleRunnable(Lifecycle lifecycle, Logger logger) {
this.lifecycle = Objects.requireNonNull(lifecycle, "lifecycle must not be null");
this.logger = Objects.requireNonNull(logger, "logger must not be null");
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java
new file mode 100644
index 0000000000..ad68471041
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.common.collect.Tuple;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
+
+/**
+ * This async IO processor allows to batch IO operations and have a single writer processing the write operations.
+ * This can be used to ensure that threads can continue with other work while the actual IO operation is still processed
+ * by a single worker. A worker in this context can be any caller of the {@link #put(Object, Consumer)} method since it will
+ * hijack a worker if nobody else is currently processing queued items. If the internal queue has reached it's capacity incoming threads
+ * might be blocked until other items are processed
+ */
+public abstract class AsyncIOProcessor<Item> {
+ private final Logger logger;
+ private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
+ private final Semaphore promiseSemaphore = new Semaphore(1);
+
+ protected AsyncIOProcessor(Logger logger, int queueSize) {
+ this.logger = logger;
+ this.queue = new ArrayBlockingQueue<>(queueSize);
+ }
+
+ /**
+ * Adds the given item to the queue. The listener is notified once the item is processed
+ */
+ public final void put(Item item, Consumer<Exception> listener) {
+ Objects.requireNonNull(item, "item must not be null");
+ Objects.requireNonNull(listener, "listener must not be null");
+ // the algorithm here tires to reduce the load on each individual caller.
+ // we try to have only one caller that processes pending items to disc while others just add to the queue but
+ // at the same time never overload the node by pushing too many items into the queue.
+
+ // we first try make a promise that we are responsible for the processing
+ final boolean promised = promiseSemaphore.tryAcquire();
+ final Tuple<Item, Consumer<Exception>> itemTuple = new Tuple<>(item, listener);
+ if (promised == false) {
+ // in this case we are not responsible and can just block until there is space
+ try {
+ queue.put(new Tuple<>(item, listener));
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ listener.accept(e);
+ }
+ }
+
+ // here we have to try to make the promise again otherwise there is a race when a thread puts an entry without making the promise
+ // while we are draining that mean we might exit below too early in the while loop if the drainAndSync call is fast.
+ if (promised || promiseSemaphore.tryAcquire()) {
+ final List<Tuple<Item, Consumer<Exception>>> candidates = new ArrayList<>();
+ try {
+ if (promised) {
+ // we are responsible for processing we don't need to add the tuple to the queue we can just add it to the candidates
+ candidates.add(itemTuple);
+ }
+ // since we made the promise to process we gotta do it here at least once
+ drainAndProcess(candidates);
+ } finally {
+ promiseSemaphore.release(); // now to ensure we are passing it on we release the promise so another thread can take over
+ }
+ while (queue.isEmpty() == false && promiseSemaphore.tryAcquire()) {
+ // yet if the queue is not empty AND nobody else has yet made the promise to take over we continue processing
+ try {
+ drainAndProcess(candidates);
+ } finally {
+ promiseSemaphore.release();
+ }
+ }
+ }
+ }
+
+ private void drainAndProcess(List<Tuple<Item, Consumer<Exception>>> candidates) {
+ queue.drainTo(candidates);
+ processList(candidates);
+ candidates.clear();
+ }
+
+ private void processList(List<Tuple<Item, Consumer<Exception>>> candidates) {
+ Exception exception = null;
+ if (candidates.isEmpty() == false) {
+ try {
+ write(candidates);
+ } catch (Exception ex) { // if this fails we are in deep shit - fail the request
+ logger.debug("failed to write candidates", ex);
+ // this exception is passed to all listeners - we don't retry. if this doesn't work we are in deep shit
+ exception = ex;
+ }
+ }
+ for (Tuple<Item, Consumer<Exception>> tuple : candidates) {
+ Consumer<Exception> consumer = tuple.v2();
+ try {
+ consumer.accept(exception);
+ } catch (Exception ex) {
+ logger.warn("failed to notify callback", ex);
+ }
+ }
+ }
+
+ /**
+ * Writes or processes the items out or to disk.
+ */
+ protected abstract void write(List<Tuple<Item, Consumer<Exception>>> candidates) throws IOException;
+}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java
index 878645eddf..825d18b7e6 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java
@@ -19,17 +19,16 @@
package org.elasticsearch.common.util.concurrent;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
-/**
- */
public class LoggingRunnable implements Runnable {
private final Runnable runnable;
+ private final Logger logger;
- private final ESLogger logger;
-
- public LoggingRunnable(ESLogger logger, Runnable runnable) {
+ public LoggingRunnable(Logger logger, Runnable runnable) {
this.runnable = runnable;
this.logger = logger;
}
@@ -39,7 +38,8 @@ public class LoggingRunnable implements Runnable {
try {
runnable.run();
} catch (Exception e) {
- logger.warn("failed to execute [{}]", e, runnable.toString());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
}
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
index bf1ef6a563..8c04c24ec5 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
@@ -269,7 +269,7 @@ public final class ThreadContext implements Closeable, Writeable {
}
this.requestHeaders = requestHeaders;
- this.responseHeaders = in.readMapOfLists();
+ this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
this.transientHeaders = Collections.emptyMap();
}
@@ -370,7 +370,7 @@ public final class ThreadContext implements Closeable, Writeable {
out.writeString(entry.getValue());
}
- out.writeMapOfLists(responseHeaders);
+ out.writeMapOfLists(responseHeaders, StreamOutput::writeString, StreamOutput::writeString);
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
index 6f8a606d9a..a623a86c9b 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
@@ -60,6 +60,9 @@ public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatc
ValueType type);
public <T> void declareField(BiConsumer<Value, T> consumer, NoContextParser<T> parser, ParseField parseField, ValueType type) {
+ if (parser == null) {
+ throw new IllegalArgumentException("[parser] is required");
+ }
declareField(consumer, (p, c) -> parser.parse(p), parseField, type);
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java
index e1400463a7..b8a42cd1e1 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java
@@ -103,7 +103,7 @@ public final class ConstructingObjectParser<Value, Context extends ParseFieldMat
/**
* Build the parser.
- *
+ *
* @param name The name given to the delegate ObjectParser for error identification. Use what you'd use if the object worked with
* ObjectParser.
* @param builder A function that builds the object from an array of Objects. Declare this inline with the parser, casting the elements
@@ -113,7 +113,24 @@ public final class ConstructingObjectParser<Value, Context extends ParseFieldMat
* allocations.
*/
public ConstructingObjectParser(String name, Function<Object[], Value> builder) {
- objectParser = new ObjectParser<>(name);
+ this(name, false, builder);
+ }
+
+ /**
+ * Build the parser.
+ *
+ * @param name The name given to the delegate ObjectParser for error identification. Use what you'd use if the object worked with
+ * ObjectParser.
+ * @param ignoreUnknownFields Should this parser ignore unknown fields? This should generally be set to true only when parsing responses
+ * from external systems, never when parsing requests from users.
+ * @param builder A function that builds the object from an array of Objects. Declare this inline with the parser, casting the elements
+ * of the array to the arguments so they work with your favorite constructor. The objects in the array will be in the same order
+ * that you declared the {{@link #constructorArg()}s and none will be null. If any of the constructor arguments aren't defined in
+ * the XContent then parsing will throw an error. We use an array here rather than a {@code Map<String, Object>} to save on
+ * allocations.
+ */
+ public ConstructingObjectParser(String name, boolean ignoreUnknownFields, Function<Object[], Value> builder) {
+ objectParser = new ObjectParser<>(name, ignoreUnknownFields, null);
this.builder = builder;
}
@@ -153,6 +170,19 @@ public final class ConstructingObjectParser<Value, Context extends ParseFieldMat
@Override
public <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField, ValueType type) {
+ if (consumer == null) {
+ throw new IllegalArgumentException("[consumer] is required");
+ }
+ if (parser == null) {
+ throw new IllegalArgumentException("[parser] is required");
+ }
+ if (parseField == null) {
+ throw new IllegalArgumentException("[parseField] is required");
+ }
+ if (type == null) {
+ throw new IllegalArgumentException("[type] is required");
+ }
+
if (consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER) {
/*
* Constructor arguments are detected by this "marker" consumer. It keeps the API looking clean even if it is a bit sleezy. We
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
index 44d9e6e199..2abd6e66df 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.common.xcontent;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
@@ -83,6 +84,11 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
private final Map<String, FieldParser> fieldParserMap = new HashMap<>();
private final String name;
private final Supplier<Value> valueSupplier;
+ /**
+ * Should this parser ignore unknown fields? This should generally be set to true only when parsing responses from external systems,
+ * never when parsing requests from users.
+ */
+ private final boolean ignoreUnknownFields;
/**
* Creates a new ObjectParser instance with a name. This name is used to reference the parser in exceptions and messages.
@@ -96,9 +102,21 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
* @param name the parsers name, used to reference the parser in exceptions and messages.
* @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser.
*/
- public ObjectParser(String name, Supplier<Value> valueSupplier) {
+ public ObjectParser(String name, @Nullable Supplier<Value> valueSupplier) {
+ this(name, false, valueSupplier);
+ }
+
+ /**
+ * Creates a new ObjectParser instance which a name.
+ * @param name the parsers name, used to reference the parser in exceptions and messages.
+ * @param ignoreUnknownFields Should this parser ignore unknown fields? This should generally be set to true only when parsing
+ * responses from external systems, never when parsing requests from users.
+ * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser.
+ */
+ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier<Value> valueSupplier) {
this.name = name;
this.valueSupplier = valueSupplier;
+ this.ignoreUnknownFields = ignoreUnknownFields;
}
/**
@@ -144,9 +162,13 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
if (currentFieldName == null) {
throw new IllegalStateException("[" + name + "] no field found");
}
- assert fieldParser != null;
- fieldParser.assertSupports(name, token, currentFieldName, context.getParseFieldMatcher());
- parseSub(parser, fieldParser, currentFieldName, value, context);
+ if (fieldParser == null) {
+ assert ignoreUnknownFields : "this should only be possible if configured to ignore known fields";
+ parser.skipChildren(); // noop if parser points to a value, skips children if parser is start object or start array
+ } else {
+ fieldParser.assertSupports(name, token, currentFieldName, context.getParseFieldMatcher());
+ parseSub(parser, fieldParser, currentFieldName, value, context);
+ }
fieldParser = null;
}
}
@@ -169,6 +191,12 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
void parse(XContentParser parser, Value value, Context context) throws IOException;
}
public void declareField(Parser<Value, Context> p, ParseField parseField, ValueType type) {
+ if (parseField == null) {
+ throw new IllegalArgumentException("[parseField] is required");
+ }
+ if (type == null) {
+ throw new IllegalArgumentException("[type] is required");
+ }
FieldParser fieldParser = new FieldParser(p, type.supportedTokens(), parseField, type);
for (String fieldValue : parseField.getAllNamesIncludedDeprecated()) {
fieldParserMap.putIfAbsent(fieldValue, fieldParser);
@@ -178,6 +206,12 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
@Override
public <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField,
ValueType type) {
+ if (consumer == null) {
+ throw new IllegalArgumentException("[consumer] is required");
+ }
+ if (parser == null) {
+ throw new IllegalArgumentException("[parser] is required");
+ }
declareField((p, v, c) -> consumer.accept(v, parser.parse(p, c)), parseField, type);
}
@@ -362,7 +396,7 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
private FieldParser getParser(String fieldName) {
FieldParser<Value> parser = fieldParserMap.get(fieldName);
- if (parser == null) {
+ if (parser == null && false == ignoreUnknownFields) {
throw new IllegalArgumentException("[" + name + "] unknown field [" + fieldName + "], parser not found");
}
return parser;
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java
index 35579965f3..c73f5f19d2 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java
@@ -25,6 +25,8 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
+import java.util.Collections;
+import java.util.Set;
/**
* A generic abstraction on top of handling content, inspired by JSON and pull parsing.
@@ -42,27 +44,20 @@ public interface XContent {
* Creates a new generator using the provided output stream.
*/
default XContentGenerator createGenerator(OutputStream os) throws IOException {
- return createGenerator(os, null, true);
+ return createGenerator(os, Collections.emptySet(), Collections.emptySet());
}
/**
- * Creates a new generator using the provided output stream and some
- * inclusive filters. Same as createGenerator(os, filters, true).
- */
- default XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
- return createGenerator(os, filters, true);
- }
-
- /**
- * Creates a new generator using the provided output stream and some
- * filters.
+ * Creates a new generator using the provided output stream and some inclusive and/or exclusive filters. When both exclusive and
+ * inclusive filters are provided, the underlying generator will first use exclusion filters to remove fields and then will check the
+ * remaining fields against the inclusive filters.
*
- * @param inclusive
- * If true only paths matching a filter will be included in
- * output. If false no path matching a filter will be included in
- * output
+ * @param os the output stream
+ * @param includes the inclusive filters: only fields and objects that match the inclusive filters will be written to the output.
+ * @param excludes the exclusive filters: only fields and objects that don't match the exclusive filters will be written to the output.
*/
- XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException;
+ XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException;
+
/**
* Creates a parser over the provided string content.
*/
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
index 1d8ce366b9..c416aeffe3 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
@@ -19,20 +19,6 @@
package org.elasticsearch.common.xcontent;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.math.BigDecimal;
-import java.math.RoundingMode;
-import java.nio.file.Path;
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
@@ -47,59 +33,137 @@ import org.joda.time.ReadableInstant;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
+import java.io.Flushable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Path;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
/**
* A utility to build XContent (ie json).
*/
-public final class XContentBuilder implements BytesStream, Releasable {
-
- public static final DateTimeFormatter defaultDatePrinter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+public final class XContentBuilder implements BytesStream, Releasable, Flushable {
+ /**
+ * Create a new {@link XContentBuilder} using the given {@link XContent} content.
+ * <p>
+ * The builder uses an internal {@link BytesStreamOutput} output stream to build the content.
+ * </p>
+ *
+ * @param xContent the {@link XContent}
+ * @return a new {@link XContentBuilder}
+ * @throws IOException if an {@link IOException} occurs while building the content
+ */
public static XContentBuilder builder(XContent xContent) throws IOException {
return new XContentBuilder(xContent, new BytesStreamOutput());
}
- public static XContentBuilder builder(XContent xContent, String[] filters) throws IOException {
- return new XContentBuilder(xContent, new BytesStreamOutput(), filters);
+ /**
+ * Create a new {@link XContentBuilder} using the given {@link XContent} content and some inclusive and/or exclusive filters.
+ * <p>
+ * The builder uses an internal {@link BytesStreamOutput} output stream to build the content. When both exclusive and
+ * inclusive filters are provided, the underlying builder will first use exclusion filters to remove fields and then will check the
+ * remaining fields against the inclusive filters.
+ * <p>
+ *
+ * @param xContent the {@link XContent}
+ * @param includes the inclusive filters: only fields and objects that match the inclusive filters will be written to the output.
+ * @param excludes the exclusive filters: only fields and objects that don't match the exclusive filters will be written to the output.
+ * @throws IOException if an {@link IOException} occurs while building the content
+ */
+ public static XContentBuilder builder(XContent xContent, Set<String> includes, Set<String> excludes) throws IOException {
+ return new XContentBuilder(xContent, new BytesStreamOutput(), includes, excludes);
+ }
+
+ public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+
+ private static final Map<Class<?>, Writer> WRITERS;
+ static {
+ Map<Class<?>, Writer> writers = new HashMap<>();
+ writers.put(Boolean.class, (b, v) -> b.value((Boolean) v));
+ writers.put(Byte.class, (b, v) -> b.value((Byte) v));
+ writers.put(byte[].class, (b, v) -> b.value((byte[]) v));
+ writers.put(BytesRef.class, (b, v) -> b.binaryValue((BytesRef) v));
+ writers.put(Date.class, (b, v) -> b.value((Date) v));
+ writers.put(Double.class, (b, v) -> b.value((Double) v));
+ writers.put(double[].class, (b, v) -> b.values((double[]) v));
+ writers.put(Float.class, (b, v) -> b.value((Float) v));
+ writers.put(float[].class, (b, v) -> b.values((float[]) v));
+ writers.put(GeoPoint.class, (b, v) -> b.value((GeoPoint) v));
+ writers.put(Integer.class, (b, v) -> b.value((Integer) v));
+ writers.put(int[].class, (b, v) -> b.values((int[]) v));
+ writers.put(Long.class, (b, v) -> b.value((Long) v));
+ writers.put(long[].class, (b, v) -> b.values((long[]) v));
+ writers.put(Short.class, (b, v) -> b.value((Short) v));
+ writers.put(short[].class, (b, v) -> b.values((short[]) v));
+ writers.put(String.class, (b, v) -> b.value((String) v));
+ writers.put(String[].class, (b, v) -> b.values((String[]) v));
+ writers.put(Text.class, (b, v) -> b.value((Text) v));
+
+ WRITERS = Collections.unmodifiableMap(writers);
}
- public static XContentBuilder builder(XContent xContent, String[] filters, boolean inclusive) throws IOException {
- return new XContentBuilder(xContent, new BytesStreamOutput(), filters, inclusive);
+ @FunctionalInterface
+ private interface Writer {
+ void write(XContentBuilder builder, Object value) throws IOException;
}
- private XContentGenerator generator;
+ /**
+ * XContentGenerator used to build the XContent object
+ */
+ private final XContentGenerator generator;
+ /**
+ * Output stream to which the built object is written
+ */
private final OutputStream bos;
+ /**
+ * When this flag is set to true, some types of values are written in a format easier to read for a human.
+ */
private boolean humanReadable = false;
/**
- * Constructs a new builder using the provided xcontent and an OutputStream. Make sure
+ * Constructs a new builder using the provided XContent and an OutputStream. Make sure
* to call {@link #close()} when the builder is done with.
*/
public XContentBuilder(XContent xContent, OutputStream bos) throws IOException {
- this(xContent, bos, null);
+ this(xContent, bos, Collections.emptySet(), Collections.emptySet());
}
/**
- * Constructs a new builder using the provided xcontent, an OutputStream and
+ * Constructs a new builder using the provided XContent, an OutputStream and
* some filters. If filters are specified, only those values matching a
* filter will be written to the output stream. Make sure to call
* {@link #close()} when the builder is done with.
*/
- public XContentBuilder(XContent xContent, OutputStream bos, String[] filters) throws IOException {
- this(xContent, bos, filters, true);
+ public XContentBuilder(XContent xContent, OutputStream bos, Set<String> includes) throws IOException {
+ this(xContent, bos, includes, Collections.emptySet());
}
/**
- * Constructs a new builder using the provided xcontent, an OutputStream and
- * some filters. If {@code filters} are specified and {@code inclusive} is
- * true, only those values matching a filter will be written to the output
- * stream. If {@code inclusive} is false, those matching will be excluded.
+ * Creates a new builder using the provided XContent, output stream and some inclusive and/or exclusive filters. When both exclusive and
+ * inclusive filters are provided, the underlying builder will first use exclusion filters to remove fields and then will check the
+ * remaining fields against the inclusive filters.
+ * <p>
* Make sure to call {@link #close()} when the builder is done with.
+ *
+ * @param os the output stream
+ * @param includes the inclusive filters: only fields and objects that match the inclusive filters will be written to the output.
+ * @param excludes the exclusive filters: only fields and objects that don't match the exclusive filters will be written to the output.
*/
- public XContentBuilder(XContent xContent, OutputStream bos, String[] filters, boolean inclusive) throws IOException {
- this.bos = bos;
- this.generator = xContent.createGenerator(bos, filters, inclusive);
+ public XContentBuilder(XContent xContent, OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
+ this.bos = os;
+ this.generator = xContent.createGenerator(bos, includes, excludes);
}
public XContentType contentType() {
@@ -115,41 +179,45 @@ public final class XContentBuilder implements BytesStream, Releasable {
return generator.isPrettyPrint();
}
+ /**
+ * Indicate that the current {@link XContentBuilder} must write a line feed ("\n")
+ * at the end of the built object.
+ * <p>
+ * This only applies for JSON XContent type. It has no effect for other types.
+ */
public XContentBuilder lfAtEnd() {
generator.usePrintLineFeedAtEnd();
return this;
}
+ /**
+ * Set the "human readable" flag. Once set, some types of values are written in a
+ * format easier to read for a human.
+ */
public XContentBuilder humanReadable(boolean humanReadable) {
this.humanReadable = humanReadable;
return this;
}
+ /**
+ * @return the value of the "human readable" flag. When the value is equal to true,
+ * some types of values are written in a format easier to read for a human.
+ */
public boolean humanReadable() {
return this.humanReadable;
}
- public XContentBuilder field(String name, ToXContent xContent) throws IOException {
- field(name);
- xContent.toXContent(this, ToXContent.EMPTY_PARAMS);
- return this;
- }
+ ////////////////////////////////////////////////////////////////////////////
+ // Structure (object, array, field, null values...)
+ //////////////////////////////////
- public XContentBuilder field(String name, ToXContent xContent, ToXContent.Params params) throws IOException {
- field(name);
- xContent.toXContent(this, params);
+ public XContentBuilder startObject() throws IOException {
+ generator.writeStartObject();
return this;
}
public XContentBuilder startObject(String name) throws IOException {
- field(name);
- startObject();
- return this;
- }
-
- public XContentBuilder startObject() throws IOException {
- generator.writeStartObject();
- return this;
+ return field(name).startObject();
}
public XContentBuilder endObject() throws IOException {
@@ -157,33 +225,13 @@ public final class XContentBuilder implements BytesStream, Releasable {
return this;
}
- public XContentBuilder array(String name, String... values) throws IOException {
- startArray(name);
- for (String value : values) {
- value(value);
- }
- endArray();
- return this;
- }
-
- public XContentBuilder array(String name, Object... values) throws IOException {
- startArray(name);
- for (Object value : values) {
- value(value);
- }
- endArray();
+ public XContentBuilder startArray() throws IOException {
+ generator.writeStartArray();
return this;
}
public XContentBuilder startArray(String name) throws IOException {
- field(name);
- startArray();
- return this;
- }
-
- public XContentBuilder startArray() throws IOException {
- generator.writeStartArray();
- return this;
+ return field(name).startArray();
}
public XContentBuilder endArray() throws IOException {
@@ -192,563 +240,727 @@ public final class XContentBuilder implements BytesStream, Releasable {
}
public XContentBuilder field(String name) throws IOException {
- if (name == null) {
- throw new IllegalArgumentException("field name cannot be null");
- }
+ ensureNameNotNull(name);
generator.writeFieldName(name);
return this;
}
- public XContentBuilder field(String name, char[] value, int offset, int length) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeString(value, offset, length);
- }
+ public XContentBuilder nullField(String name) throws IOException {
+ ensureNameNotNull(name);
+ generator.writeNullField(name);
return this;
}
- public XContentBuilder field(String name, String value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeString(value);
- }
+ public XContentBuilder nullValue() throws IOException {
+ generator.writeNull();
return this;
}
- public XContentBuilder field(String name, Integer value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeNumber(value.intValue());
- }
- return this;
+ ////////////////////////////////////////////////////////////////////////////
+ // Boolean
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Boolean value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.booleanValue());
}
- public XContentBuilder field(String name, int value) throws IOException {
- field(name);
- generator.writeNumber(value);
+ public XContentBuilder field(String name, boolean value) throws IOException {
+ ensureNameNotNull(name);
+ generator.writeBooleanField(name, value);
return this;
}
- public XContentBuilder field(String name, Long value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeNumber(value.longValue());
+ public XContentBuilder array(String name, boolean[] values) throws IOException {
+ return field(name).values(values);
+ }
+
+ private XContentBuilder values(boolean[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
}
+ startArray();
+ for (boolean b : values) {
+ value(b);
+ }
+ endArray();
return this;
}
- public XContentBuilder field(String name, long value) throws IOException {
- field(name);
- generator.writeNumber(value);
- return this;
+ public XContentBuilder value(Boolean value) throws IOException {
+ return (value == null) ? nullValue() : value(value.booleanValue());
}
- public XContentBuilder field(String name, Float value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeNumber(value.floatValue());
- }
+ public XContentBuilder value(boolean value) throws IOException {
+ generator.writeBoolean(value);
return this;
}
- public XContentBuilder field(String name, float value) throws IOException {
- field(name);
+ ////////////////////////////////////////////////////////////////////////////
+ // Byte
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Byte value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.byteValue());
+ }
+
+ public XContentBuilder field(String name, byte value) throws IOException {
+ return field(name).value(value);
+ }
+
+ public XContentBuilder value(Byte value) throws IOException {
+ return (value == null) ? nullValue() : value(value.byteValue());
+ }
+
+ public XContentBuilder value(byte value) throws IOException {
generator.writeNumber(value);
return this;
}
+ ////////////////////////////////////////////////////////////////////////////
+ // Double
+ //////////////////////////////////
+
public XContentBuilder field(String name, Double value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeNumber(value);
- }
- return this;
+ return (value == null) ? nullField(name) : field(name, value.doubleValue());
}
public XContentBuilder field(String name, double value) throws IOException {
- field(name);
- generator.writeNumber(value);
+ ensureNameNotNull(name);
+ generator.writeNumberField(name, value);
return this;
}
- public XContentBuilder field(String name, BigDecimal value) throws IOException {
- return field(name, value, value.scale(), RoundingMode.HALF_UP, true);
+ public XContentBuilder array(String name, double[] values) throws IOException {
+ return field(name).values(values);
}
- public XContentBuilder field(String name, BigDecimal value, int scale, RoundingMode rounding, boolean toDouble) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- if (toDouble) {
- try {
- generator.writeNumber(value.setScale(scale, rounding).doubleValue());
- } catch (ArithmeticException e) {
- generator.writeString(value.toEngineeringString());
- }
- } else {
- generator.writeString(value.toEngineeringString());
- }
+ private XContentBuilder values(double[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
+ }
+ startArray();
+ for (double b : values) {
+ value(b);
}
+ endArray();
return this;
}
- /**
- * Writes the binary content of the given BytesRef
- * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
- */
- public XContentBuilder field(String name, BytesRef value) throws IOException {
- field(name);
- generator.writeBinary(value.bytes, value.offset, value.length);
- return this;
+ public XContentBuilder value(Double value) throws IOException {
+ return (value == null) ? nullValue() : value(value.doubleValue());
}
- /**
- * Writes the binary content of the given BytesReference
- * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
- */
- public XContentBuilder field(String name, BytesReference value) throws IOException {
- field(name);
- final BytesRef ref = value.toBytesRef();
- generator.writeBinary(ref.bytes, ref.offset, ref.length);
+ public XContentBuilder value(double value) throws IOException {
+ generator.writeNumber(value);
return this;
}
- /**
- * Writes the binary content of the given BytesRef as UTF-8 bytes
- * Use {@link XContentParser#utf8Bytes()} to read the value back
- */
- public XContentBuilder utf8Field(String name, BytesRef value) throws IOException {
- field(name);
- generator.writeUTF8String(value.bytes, value.offset, value.length);
- return this;
- }
+ ////////////////////////////////////////////////////////////////////////////
+ // Float
+ //////////////////////////////////
- public XContentBuilder field(String name, Text value) throws IOException {
- field(name);
- if (value.hasString()) {
- generator.writeString(value.string());
- } else {
- // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a request to jackson to support InputStream as well?
- final BytesRef ref = value.bytes().toBytesRef();
- generator.writeUTF8String(ref.bytes, ref.offset, ref.length);
- }
- return this;
+ public XContentBuilder field(String name, Float value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.floatValue());
}
- public XContentBuilder field(String name, byte[] value, int offset, int length) throws IOException {
- field(name);
- generator.writeBinary(value, offset, length);
+ public XContentBuilder field(String name, float value) throws IOException {
+ ensureNameNotNull(name);
+ generator.writeNumberField(name, value);
return this;
}
- public XContentBuilder field(String name, Map<String, Object> value) throws IOException {
- field(name);
- value(value);
- return this;
+ public XContentBuilder array(String name, float[] values) throws IOException {
+ return field(name).values(values);
}
- public XContentBuilder field(String name, Iterable<?> value) throws IOException {
- if (value instanceof Path) {
- //treat Paths as single value
- field(name);
- value(value);
- } else {
- startArray(name);
- for (Object o : value) {
- value(o);
- }
- endArray();
+ private XContentBuilder values(float[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
}
- return this;
- }
-
- public XContentBuilder field(String name, boolean... value) throws IOException {
- startArray(name);
- for (boolean o : value) {
- value(o);
+ startArray();
+ for (float f : values) {
+ value(f);
}
endArray();
return this;
}
- public XContentBuilder field(String name, String... value) throws IOException {
- startArray(name);
- for (String o : value) {
- value(o);
- }
- endArray();
- return this;
+ public XContentBuilder value(Float value) throws IOException {
+ return (value == null) ? nullValue() : value(value.floatValue());
}
- public XContentBuilder field(String name, Object... value) throws IOException {
- startArray(name);
- for (Object o : value) {
- value(o);
- }
- endArray();
+ public XContentBuilder value(float value) throws IOException {
+ generator.writeNumber(value);
return this;
}
- public XContentBuilder field(String name, int... value) throws IOException {
- startArray(name);
- for (Object o : value) {
- value(o);
- }
- endArray();
- return this;
+ ////////////////////////////////////////////////////////////////////////////
+ // Integer
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Integer value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.intValue());
}
- public XContentBuilder field(String name, long... value) throws IOException {
- startArray(name);
- for (Object o : value) {
- value(o);
- }
- endArray();
+ public XContentBuilder field(String name, int value) throws IOException {
+ ensureNameNotNull(name);
+ generator.writeNumberField(name, value);
return this;
}
- public XContentBuilder field(String name, float... value) throws IOException {
- startArray(name);
- for (Object o : value) {
- value(o);
- }
- endArray();
- return this;
+ public XContentBuilder array(String name, int[] values) throws IOException {
+ return field(name).values(values);
}
- public XContentBuilder field(String name, double... value) throws IOException {
- startArray(name);
- for (Object o : value) {
- value(o);
+ private XContentBuilder values(int[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
+ }
+ startArray();
+ for (int i : values) {
+ value(i);
}
endArray();
return this;
}
- public XContentBuilder field(String name, Object value) throws IOException {
- field(name);
- writeValue(value);
- return this;
+ public XContentBuilder value(Integer value) throws IOException {
+ return (value == null) ? nullValue() : value(value.intValue());
}
- public XContentBuilder value(Object value) throws IOException {
- writeValue(value);
+ public XContentBuilder value(int value) throws IOException {
+ generator.writeNumber(value);
return this;
}
- public XContentBuilder field(String name, boolean value) throws IOException {
- field(name);
- generator.writeBoolean(value);
+ ////////////////////////////////////////////////////////////////////////////
+ // Long
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Long value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.longValue());
+ }
+
+ public XContentBuilder field(String name, long value) throws IOException {
+ ensureNameNotNull(name);
+ generator.writeNumberField(name, value);
return this;
}
- public XContentBuilder field(String name, byte[] value) throws IOException {
- field(name);
- if (value == null) {
- generator.writeNull();
- } else {
- generator.writeBinary(value);
+ public XContentBuilder array(String name, long[] values) throws IOException {
+ return field(name).values(values);
+ }
+
+ private XContentBuilder values(long[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
}
+ startArray();
+ for (long l : values) {
+ value(l);
+ }
+ endArray();
return this;
}
- public XContentBuilder field(String name, ReadableInstant date) throws IOException {
- field(name);
- return value(date);
+ public XContentBuilder value(Long value) throws IOException {
+ return (value == null) ? nullValue() : value(value.longValue());
}
- public XContentBuilder field(String name, ReadableInstant date, DateTimeFormatter formatter) throws IOException {
- field(name);
- return value(date, formatter);
+ public XContentBuilder value(long value) throws IOException {
+ generator.writeNumber(value);
+ return this;
}
- public XContentBuilder field(String name, Date date) throws IOException {
- field(name);
- return value(date);
- }
+ ////////////////////////////////////////////////////////////////////////////
+ // Short
+ //////////////////////////////////
- public XContentBuilder field(String name, Date date, DateTimeFormatter formatter) throws IOException {
- field(name);
- return value(date, formatter);
+ public XContentBuilder field(String name, Short value) throws IOException {
+ return (value == null) ? nullField(name) : field(name, value.shortValue());
}
- public XContentBuilder nullField(String name) throws IOException {
- generator.writeNullField(name);
- return this;
+ public XContentBuilder field(String name, short value) throws IOException {
+ return field(name).value(value);
}
- public XContentBuilder nullValue() throws IOException {
- generator.writeNull();
- return this;
+ public XContentBuilder array(String name, short[] values) throws IOException {
+ return field(name).values(values);
}
- public XContentBuilder rawField(String fieldName, InputStream content) throws IOException {
- generator.writeRawField(fieldName, content);
+ private XContentBuilder values(short[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
+ }
+ startArray();
+ for (short s : values) {
+ value(s);
+ }
+ endArray();
return this;
}
- public XContentBuilder rawField(String fieldName, BytesReference content) throws IOException {
- generator.writeRawField(fieldName, content);
- return this;
+ public XContentBuilder value(Short value) throws IOException {
+ return (value == null) ? nullValue() : value(value.shortValue());
}
- public XContentBuilder rawValue(BytesReference content) throws IOException {
- generator.writeRawValue(content);
+ public XContentBuilder value(short value) throws IOException {
+ generator.writeNumber(value);
return this;
}
- public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, TimeValue timeValue) throws IOException {
- if (humanReadable) {
- field(readableFieldName, timeValue.toString());
+ ////////////////////////////////////////////////////////////////////////////
+ // String
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, String value) throws IOException {
+ if (value == null) {
+ return nullField(name);
}
- field(rawFieldName, timeValue.millis());
+ ensureNameNotNull(name);
+ generator.writeStringField(name, value);
return this;
}
- public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, long rawTime) throws IOException {
- if (humanReadable) {
- field(readableFieldName, new TimeValue(rawTime).toString());
+ public XContentBuilder array(String name, String... values) throws IOException {
+ return field(name).values(values);
+ }
+
+ private XContentBuilder values(String[] values) throws IOException {
+ if (values == null) {
+ return nullValue();
}
- field(rawFieldName, rawTime);
+ startArray();
+ for (String s : values) {
+ value(s);
+ }
+ endArray();
return this;
}
- public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, long rawTime, TimeUnit timeUnit) throws
- IOException {
- if (humanReadable) {
- field(readableFieldName, new TimeValue(rawTime, timeUnit).toString());
+ public XContentBuilder value(String value) throws IOException {
+ if (value == null) {
+ return nullValue();
}
- field(rawFieldName, rawTime);
+ generator.writeString(value);
return this;
}
- public XContentBuilder dateValueField(String rawFieldName, String readableFieldName, long rawTimestamp) throws IOException {
- if (humanReadable) {
- field(readableFieldName, defaultDatePrinter.print(rawTimestamp));
+ ////////////////////////////////////////////////////////////////////////////
+ // Binary
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, byte[] value) throws IOException {
+ if (value == null) {
+ return nullField(name);
}
- field(rawFieldName, rawTimestamp);
+ ensureNameNotNull(name);
+ generator.writeBinaryField(name, value);
return this;
}
- public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, ByteSizeValue byteSizeValue) throws IOException {
- if (humanReadable) {
- field(readableFieldName, byteSizeValue.toString());
+ public XContentBuilder value(byte[] value) throws IOException {
+ if (value == null) {
+ return nullValue();
}
- field(rawFieldName, byteSizeValue.bytes());
+ generator.writeBinary(value);
return this;
}
- public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, long rawSize) throws IOException {
- if (humanReadable) {
- field(readableFieldName, new ByteSizeValue(rawSize).toString());
+ public XContentBuilder field(String name, byte[] value, int offset, int length) throws IOException {
+ return field(name).value(value, offset, length);
+ }
+
+ public XContentBuilder value(byte[] value, int offset, int length) throws IOException {
+ if (value == null) {
+ return nullValue();
}
- field(rawFieldName, rawSize);
+ generator.writeBinary(value, offset, length);
return this;
}
- public XContentBuilder percentageField(String rawFieldName, String readableFieldName, double percentage) throws IOException {
- if (humanReadable) {
- field(readableFieldName, String.format(Locale.ROOT, "%1.1f%%", percentage));
+ /**
+ * Writes the binary content of the given {@link BytesRef}.
+ *
+ * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
+ */
+ public XContentBuilder field(String name, BytesRef value) throws IOException {
+ return field(name).binaryValue(value);
+ }
+
+ /**
+ * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes.
+ *
+ * Use {@link XContentParser#utf8Bytes()} to read the value back
+ */
+ public XContentBuilder utf8Field(String name, BytesRef value) throws IOException {
+ return field(name).utf8Value(value);
+ }
+
+ /**
+ * Writes the binary content of the given {@link BytesRef}.
+ *
+ * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
+ */
+ public XContentBuilder binaryValue(BytesRef value) throws IOException {
+ if (value == null) {
+ return nullValue();
}
- field(rawFieldName, percentage);
+ value(value.bytes, value.offset, value.length);
return this;
}
- public XContentBuilder value(Boolean value) throws IOException {
+ /**
+ * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes.
+ *
+ * Use {@link XContentParser#utf8Bytes()} to read the value back
+ */
+ public XContentBuilder utf8Value(BytesRef value) throws IOException {
if (value == null) {
return nullValue();
}
- return value(value.booleanValue());
+ generator.writeUTF8String(value.bytes, value.offset, value.length);
+ return this;
}
- public XContentBuilder value(boolean value) throws IOException {
- generator.writeBoolean(value);
- return this;
+ /**
+ * Writes the binary content of the given {@link BytesReference}.
+ *
+ * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
+ */
+ public XContentBuilder field(String name, BytesReference value) throws IOException {
+ return field(name).value(value);
}
- public XContentBuilder value(ReadableInstant date) throws IOException {
- return value(date, defaultDatePrinter);
+ /**
+ * Writes the binary content of the given {@link BytesReference}.
+ *
+ * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
+ */
+ public XContentBuilder value(BytesReference value) throws IOException {
+ return (value == null) ? nullValue() : binaryValue(value.toBytesRef());
}
- public XContentBuilder value(ReadableInstant date, DateTimeFormatter dateTimeFormatter) throws IOException {
- if (date == null) {
+ ////////////////////////////////////////////////////////////////////////////
+ // Text
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Text value) throws IOException {
+ return field(name).value(value);
+ }
+
+ public XContentBuilder value(Text value) throws IOException {
+ if (value == null) {
return nullValue();
+ } else if (value.hasString()) {
+ return value(value.string());
+ } else {
+ // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a
+ // request to jackson to support InputStream as well?
+ return utf8Value(value.bytes().toBytesRef());
}
- return value(dateTimeFormatter.print(date));
}
- public XContentBuilder value(Date date) throws IOException {
- return value(date, defaultDatePrinter);
+ ////////////////////////////////////////////////////////////////////////////
+ // Date
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, ReadableInstant value) throws IOException {
+ return field(name).value(value);
}
- public XContentBuilder value(Date date, DateTimeFormatter dateTimeFormatter) throws IOException {
- if (date == null) {
- return nullValue();
- }
- return value(dateTimeFormatter.print(date.getTime()));
+ public XContentBuilder field(String name, ReadableInstant value, DateTimeFormatter formatter) throws IOException {
+ return field(name).value(value, formatter);
}
- public XContentBuilder value(Integer value) throws IOException {
+ public XContentBuilder value(ReadableInstant value) throws IOException {
+ return value(value, DEFAULT_DATE_PRINTER);
+ }
+
+ public XContentBuilder value(ReadableInstant value, DateTimeFormatter formatter) throws IOException {
if (value == null) {
return nullValue();
}
- return value(value.intValue());
+ ensureFormatterNotNull(formatter);
+ return value(formatter.print(value));
}
- public XContentBuilder value(int value) throws IOException {
- generator.writeNumber(value);
- return this;
+ public XContentBuilder field(String name, Date value) throws IOException {
+ return field(name).value(value);
}
- public XContentBuilder value(Long value) throws IOException {
+ public XContentBuilder field(String name, Date value, DateTimeFormatter formatter) throws IOException {
+ return field(name).value(value, formatter);
+ }
+
+ public XContentBuilder value(Date value) throws IOException {
+ return value(value, DEFAULT_DATE_PRINTER);
+ }
+
+ public XContentBuilder value(Date value, DateTimeFormatter formatter) throws IOException {
if (value == null) {
return nullValue();
}
- return value(value.longValue());
+ return value(formatter, value.getTime());
}
- public XContentBuilder value(long value) throws IOException {
- generator.writeNumber(value);
+ public XContentBuilder dateField(String name, String readableName, long value) throws IOException {
+ if (humanReadable) {
+ field(readableName).value(DEFAULT_DATE_PRINTER, value);
+ }
+ field(name, value);
return this;
}
- public XContentBuilder value(Float value) throws IOException {
+ XContentBuilder value(Calendar value) throws IOException {
if (value == null) {
return nullValue();
}
- return value(value.floatValue());
+ return value(DEFAULT_DATE_PRINTER, value.getTimeInMillis());
}
- public XContentBuilder value(float value) throws IOException {
- generator.writeNumber(value);
- return this;
+ XContentBuilder value(DateTimeFormatter formatter, long value) throws IOException {
+ ensureFormatterNotNull(formatter);
+ return value(formatter.print(value));
}
- public XContentBuilder value(Double value) throws IOException {
+ ////////////////////////////////////////////////////////////////////////////
+ // GeoPoint & LatLon
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, GeoPoint value) throws IOException {
+ return field(name).value(value);
+ }
+
+ public XContentBuilder value(GeoPoint value) throws IOException {
if (value == null) {
return nullValue();
}
- return value(value.doubleValue());
+ return latlon(value.getLat(), value.getLon());
}
- public XContentBuilder value(double value) throws IOException {
- generator.writeNumber(value);
- return this;
+ public XContentBuilder latlon(String name, double lat, double lon) throws IOException {
+ return field(name).latlon(lat, lon);
}
- public XContentBuilder value(String value) throws IOException {
+ public XContentBuilder latlon(double lat, double lon) throws IOException {
+ return startObject().field("lat", lat).field("lon", lon).endObject();
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Path
+ //////////////////////////////////
+
+ public XContentBuilder value(Path value) throws IOException {
if (value == null) {
return nullValue();
}
- generator.writeString(value);
- return this;
+ return value(value.toString());
}
- public XContentBuilder value(byte[] value) throws IOException {
- if (value == null) {
+ ////////////////////////////////////////////////////////////////////////////
+ // Objects
+ //
+ // These methods are used when the type of value is unknown. It tries to fallback
+ // on typed methods and use Object.toString() as a last resort. Always prefer using
+ // typed methods over this.
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Object value) throws IOException {
+ return field(name).value(value);
+ }
+
+ public XContentBuilder array(String name, Object... values) throws IOException {
+ return field(name).values(values);
+ }
+
+ XContentBuilder values(Object[] values) throws IOException {
+ if (values == null) {
return nullValue();
}
- generator.writeBinary(value);
+ startArray();
+ for (Object o : values) {
+ value(o);
+ }
+ endArray();
return this;
}
- public XContentBuilder value(byte[] value, int offset, int length) throws IOException {
+ public XContentBuilder value(Object value) throws IOException {
+ unknownValue(value);
+ return this;
+ }
+
+ private void unknownValue(Object value) throws IOException {
if (value == null) {
- return nullValue();
+ nullValue();
+ return;
+ }
+ Writer writer = WRITERS.get(value.getClass());
+ if (writer != null) {
+ writer.write(this, value);
+ } else if (value instanceof Path) {
+ //Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated as an Iterable here
+ value((Path) value);
+ } else if (value instanceof Map) {
+ map((Map) value);
+ } else if (value instanceof Iterable) {
+ value((Iterable<?>) value);
+ } else if (value instanceof Object[]) {
+ values((Object[]) value);
+ } else if (value instanceof Calendar) {
+ value((Calendar) value);
+ } else if (value instanceof ReadableInstant) {
+ value((ReadableInstant) value);
+ } else if (value instanceof BytesReference) {
+ value((BytesReference) value);
+ } else if (value instanceof ToXContent) {
+ value((ToXContent) value);
+ } else {
+ // This is a "value" object (like enum, DistanceUnit, etc) just toString() it
+ // (yes, it can be misleading when toString a Java class, but really, jackson should be used in that case)
+ value(Objects.toString(value));
}
- generator.writeBinary(value, offset, length);
- return this;
}
- /**
- * Writes the binary content of the given BytesRef
- * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
- */
- public XContentBuilder value(BytesRef value) throws IOException {
+ ////////////////////////////////////////////////////////////////////////////
+ // ToXContent
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, ToXContent value) throws IOException {
+ return field(name).value(value);
+ }
+
+ public XContentBuilder field(String name, ToXContent value, ToXContent.Params params) throws IOException {
+ return field(name).value(value, params);
+ }
+
+ private XContentBuilder value(ToXContent value) throws IOException {
+ return value(value, ToXContent.EMPTY_PARAMS);
+ }
+
+ private XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException {
if (value == null) {
return nullValue();
}
- generator.writeBinary(value.bytes, value.offset, value.length);
+ value.toXContent(this, params);
return this;
}
- /**
- * Writes the binary content of the given BytesReference
- * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back
- */
- public XContentBuilder value(BytesReference value) throws IOException {
- if (value == null) {
+ ////////////////////////////////////////////////////////////////////////////
+ // Maps & Iterable
+ //////////////////////////////////
+
+ public XContentBuilder field(String name, Map<String, Object> values) throws IOException {
+ return field(name).map(values);
+ }
+
+ public XContentBuilder map(Map<String, ?> values) throws IOException {
+ if (values == null) {
return nullValue();
}
- BytesRef ref = value.toBytesRef();
- generator.writeBinary(ref.bytes, ref.offset, ref.length);
+ startObject();
+ for (Map.Entry<String, ?> value : values.entrySet()) {
+ field(value.getKey());
+ unknownValue(value.getValue());
+ }
+ endObject();
return this;
}
- public XContentBuilder value(Text value) throws IOException {
- if (value == null) {
+ public XContentBuilder field(String name, Iterable<?> values) throws IOException {
+ return field(name).value(values);
+ }
+
+ private XContentBuilder value(Iterable<?> values) throws IOException {
+ if (values == null) {
return nullValue();
- } else if (value.hasString()) {
- generator.writeString(value.string());
+ }
+
+ if (values instanceof Path) {
+ //treat as single value
+ value((Path) values);
} else {
- BytesRef bytesRef = value.bytes().toBytesRef();
- generator.writeUTF8String(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ startArray();
+ for (Object value : values) {
+ unknownValue(value);
+ }
+ endArray();
}
return this;
}
- public XContentBuilder map(Map<String, ?> map) throws IOException {
- if (map == null) {
- return nullValue();
+ ////////////////////////////////////////////////////////////////////////////
+ // Misc.
+ //////////////////////////////////
+
+ public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, TimeValue timeValue) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, timeValue.toString());
}
- writeMap(map);
+ field(rawFieldName, timeValue.millis());
return this;
}
- public XContentBuilder value(Map<String, Object> map) throws IOException {
- if (map == null) {
- return nullValue();
+ public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, long rawTime) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, new TimeValue(rawTime).toString());
}
- writeMap(map);
+ field(rawFieldName, rawTime);
return this;
}
- public XContentBuilder value(Iterable<?> value) throws IOException {
- if (value == null) {
- return nullValue();
+ public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, long rawTime, TimeUnit timeUnit) throws
+ IOException {
+ if (humanReadable) {
+ field(readableFieldName, new TimeValue(rawTime, timeUnit).toString());
}
- if (value instanceof Path) {
- //treat as single value
- writeValue(value);
- } else {
- startArray();
- for (Object o : value) {
- value(o);
- }
- endArray();
+ field(rawFieldName, rawTime);
+ return this;
+ }
+
+
+ public XContentBuilder percentageField(String rawFieldName, String readableFieldName, double percentage) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, String.format(Locale.ROOT, "%1.1f%%", percentage));
}
+ field(rawFieldName, percentage);
return this;
}
- public XContentBuilder latlon(String name, double lat, double lon) throws IOException {
- return startObject(name).field("lat", lat).field("lon", lon).endObject();
+ public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, ByteSizeValue byteSizeValue) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, byteSizeValue.toString());
+ }
+ field(rawFieldName, byteSizeValue.getBytes());
+ return this;
}
- public XContentBuilder latlon(double lat, double lon) throws IOException {
- return startObject().field("lat", lat).field("lon", lon).endObject();
+ public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, long rawSize) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, new ByteSizeValue(rawSize).toString());
+ }
+ field(rawFieldName, rawSize);
+ return this;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Raw fields
+ //////////////////////////////////
+
+ public XContentBuilder rawField(String name, InputStream value) throws IOException {
+ generator.writeRawField(name, value);
+ return this;
+ }
+
+ public XContentBuilder rawField(String name, BytesReference value) throws IOException {
+ generator.writeRawField(name, value);
+ return this;
+ }
+
+ public XContentBuilder rawValue(BytesReference value) throws IOException {
+ generator.writeRawValue(value);
+ return this;
}
public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException {
@@ -756,9 +968,9 @@ public final class XContentBuilder implements BytesStream, Releasable {
return this;
}
- public XContentBuilder flush() throws IOException {
+ @Override
+ public void flush() throws IOException {
generator.flush();
- return this;
}
@Override
@@ -766,7 +978,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
try {
generator.close();
} catch (IOException e) {
- throw new IllegalStateException("failed to close the XContentBuilder", e);
+ throw new IllegalStateException("Failed to close the XContentBuilder", e);
}
}
@@ -784,156 +996,20 @@ public final class XContentBuilder implements BytesStream, Releasable {
* Returns a string representation of the builder (only applicable for text based xcontent).
*/
public String string() throws IOException {
- close();
return bytes().utf8ToString();
}
-
- private void writeMap(Map<String, ?> map) throws IOException {
- generator.writeStartObject();
-
- for (Map.Entry<String, ?> entry : map.entrySet()) {
- field(entry.getKey());
- Object value = entry.getValue();
- if (value == null) {
- generator.writeNull();
- } else {
- writeValue(value);
- }
- }
- generator.writeEndObject();
+ static void ensureNameNotNull(String name) {
+ ensureNotNull(name, "Field name cannot be null");
}
- @FunctionalInterface
- interface Writer {
- void write(XContentGenerator g, Object v) throws IOException;
+ static void ensureFormatterNotNull(DateTimeFormatter formatter) {
+ ensureNotNull(formatter, "DateTimeFormatter cannot be null");
}
- private static final Map<Class<?>, Writer> MAP;
-
- static {
- Map<Class<?>, Writer> map = new HashMap<>();
- map.put(String.class, (g, v) -> g.writeString((String) v));
- map.put(Integer.class, (g, v) -> g.writeNumber((Integer) v));
- map.put(Long.class, (g, v) -> g.writeNumber((Long) v));
- map.put(Float.class, (g, v) -> g.writeNumber((Float) v));
- map.put(Double.class, (g, v) -> g.writeNumber((Double) v));
- map.put(Byte.class, (g, v) -> g.writeNumber((Byte) v));
- map.put(Short.class, (g, v) -> g.writeNumber((Short) v));
- map.put(Boolean.class, (g, v) -> g.writeBoolean((Boolean) v));
- map.put(GeoPoint.class, (g, v) -> {
- g.writeStartObject();
- g.writeNumberField("lat", ((GeoPoint) v).lat());
- g.writeNumberField("lon", ((GeoPoint) v).lon());
- g.writeEndObject();
- });
- map.put(int[].class, (g, v) -> {
- g.writeStartArray();
- for (int item : (int[]) v) {
- g.writeNumber(item);
- }
- g.writeEndArray();
- });
- map.put(long[].class, (g, v) -> {
- g.writeStartArray();
- for (long item : (long[]) v) {
- g.writeNumber(item);
- }
- g.writeEndArray();
- });
- map.put(float[].class, (g, v) -> {
- g.writeStartArray();
- for (float item : (float[]) v) {
- g.writeNumber(item);
- }
- g.writeEndArray();
- });
- map.put(double[].class, (g, v) -> {
- g.writeStartArray();
- for (double item : (double[])v) {
- g.writeNumber(item);
- }
- g.writeEndArray();
- });
- map.put(byte[].class, (g, v) -> g.writeBinary((byte[]) v));
- map.put(short[].class, (g, v) -> {
- g.writeStartArray();
- for (short item : (short[])v) {
- g.writeNumber(item);
- }
- g.writeEndArray();
- });
- map.put(BytesRef.class, (g, v) -> {
- BytesRef bytes = (BytesRef) v;
- g.writeBinary(bytes.bytes, bytes.offset, bytes.length);
- });
- map.put(Text.class, (g, v) -> {
- Text text = (Text) v;
- if (text.hasString()) {
- g.writeString(text.string());
- } else {
- BytesRef ref = text.bytes().toBytesRef();
- g.writeUTF8String(ref.bytes, ref.offset, ref.length);
- }
- });
- MAP = Collections.unmodifiableMap(map);
- }
-
- private void writeValue(Object value) throws IOException {
+ static void ensureNotNull(Object value, String message) {
if (value == null) {
- generator.writeNull();
- return;
+ throw new IllegalArgumentException(message);
}
- Class<?> type = value.getClass();
- Writer writer = MAP.get(type);
- if (writer != null) {
- writer.write(generator, value);
- } else if (value instanceof Map) {
- writeMap((Map) value);
- } else if (value instanceof Path) {
- //Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated as an Iterable here
- generator.writeString(value.toString());
- } else if (value instanceof Iterable) {
- writeIterable((Iterable<?>) value);
- } else if (value instanceof Object[]) {
- writeObjectArray((Object[]) value);
- } else if (value instanceof Date) {
- generator.writeString(XContentBuilder.defaultDatePrinter.print(((Date) value).getTime()));
- } else if (value instanceof Calendar) {
- generator.writeString(XContentBuilder.defaultDatePrinter.print((((Calendar) value)).getTimeInMillis()));
- } else if (value instanceof ReadableInstant) {
- generator.writeString(XContentBuilder.defaultDatePrinter.print((((ReadableInstant) value)).getMillis()));
- } else if (value instanceof BytesReference) {
- writeBytesReference((BytesReference) value);
- } else if (value instanceof ToXContent) {
- ((ToXContent) value).toXContent(this, ToXContent.EMPTY_PARAMS);
- } else {
- // if this is a "value" object, like enum, DistanceUnit, ..., just toString it
- // yea, it can be misleading when toString a Java class, but really, jackson should be used in that case
- generator.writeString(value.toString());
- //throw new ElasticsearchIllegalArgumentException("type not supported for generic value conversion: " + type);
- }
- }
-
- private void writeBytesReference(BytesReference value) throws IOException {
- BytesRef ref = value.toBytesRef();
- generator.writeBinary(ref.bytes, ref.offset, ref.length);
- }
-
- private void writeIterable(Iterable<?> value) throws IOException {
- generator.writeStartArray();
- for (Object v : value) {
- writeValue(v);
- }
- generator.writeEndArray();
- }
-
- private void writeObjectArray(Object[] value) throws IOException {
- generator.writeStartArray();
- for (Object v : value) {
- writeValue(v);
- }
- generator.writeEndArray();
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java
index a2cceae836..8d1b8efef5 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java
@@ -20,14 +20,13 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.common.bytes.BytesReference;
+
import java.io.Closeable;
+import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
-/**
- *
- */
-public interface XContentGenerator extends Closeable {
+public interface XContentGenerator extends Closeable, Flushable {
XContentType contentType();
@@ -37,68 +36,62 @@ public interface XContentGenerator extends Closeable {
void usePrintLineFeedAtEnd();
- void writeStartArray() throws IOException;
-
- void writeEndArray() throws IOException;
-
void writeStartObject() throws IOException;
void writeEndObject() throws IOException;
- void writeFieldName(String name) throws IOException;
+ void writeStartArray() throws IOException;
- void writeString(String text) throws IOException;
+ void writeEndArray() throws IOException;
- void writeString(char[] text, int offset, int len) throws IOException;
+ void writeFieldName(String name) throws IOException;
+
+ void writeNull() throws IOException;
- void writeUTF8String(byte[] text, int offset, int length) throws IOException;
+ void writeNullField(String name) throws IOException;
- void writeBinary(byte[] data, int offset, int len) throws IOException;
+ void writeBooleanField(String name, boolean value) throws IOException;
- void writeBinary(byte[] data) throws IOException;
+ void writeBoolean(boolean value) throws IOException;
- void writeNumber(int v) throws IOException;
+ void writeNumberField(String name, double value) throws IOException;
- void writeNumber(long v) throws IOException;
+ void writeNumber(double value) throws IOException;
- void writeNumber(double d) throws IOException;
+ void writeNumberField(String name, float value) throws IOException;
- void writeNumber(float f) throws IOException;
+ void writeNumber(float value) throws IOException;
- void writeBoolean(boolean state) throws IOException;
+ void writeNumberField(String name, int value) throws IOException;
- void writeNull() throws IOException;
+ void writeNumber(int value) throws IOException;
- void writeStringField(String fieldName, String value) throws IOException;
+ void writeNumberField(String name, long value) throws IOException;
- void writeBooleanField(String fieldName, boolean value) throws IOException;
+ void writeNumber(long value) throws IOException;
- void writeNullField(String fieldName) throws IOException;
+ void writeNumber(short value) throws IOException;
- void writeNumberField(String fieldName, int value) throws IOException;
+ void writeStringField(String name, String value) throws IOException;
- void writeNumberField(String fieldName, long value) throws IOException;
+ void writeString(String value) throws IOException;
- void writeNumberField(String fieldName, double value) throws IOException;
+ void writeString(char[] text, int offset, int len) throws IOException;
- void writeNumberField(String fieldName, float value) throws IOException;
+ void writeUTF8String(byte[] value, int offset, int length) throws IOException;
- void writeBinaryField(String fieldName, byte[] data) throws IOException;
+ void writeBinaryField(String name, byte[] value) throws IOException;
- void writeArrayFieldStart(String fieldName) throws IOException;
+ void writeBinary(byte[] value) throws IOException;
- void writeObjectFieldStart(String fieldName) throws IOException;
+ void writeBinary(byte[] value, int offset, int length) throws IOException;
- void writeRawField(String fieldName, InputStream content) throws IOException;
+ void writeRawField(String name, InputStream value) throws IOException;
- void writeRawField(String fieldName, BytesReference content) throws IOException;
+ void writeRawField(String name, BytesReference value) throws IOException;
- void writeRawValue(BytesReference content) throws IOException;
+ void writeRawValue(BytesReference value) throws IOException;
void copyCurrentStructure(XContentParser parser) throws IOException;
- void flush() throws IOException;
-
- @Override
- void close() throws IOException;
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java
index c8715d03c3..4224b5328a 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java
@@ -35,6 +35,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
+import java.util.Set;
/**
* A CBOR based content implementation using Jackson.
@@ -70,8 +71,8 @@ public class CborXContent implements XContent {
}
@Override
- public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
- return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
+ public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
+ return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java
index 9ec690f2d1..e63a928109 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java
@@ -20,23 +20,22 @@
package org.elasticsearch.common.xcontent.cbor;
import com.fasterxml.jackson.core.JsonGenerator;
-
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.OutputStream;
+import java.util.Collections;
+import java.util.Set;
-/**
- *
- */
public class CborXContentGenerator extends JsonXContentGenerator {
- public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
- this(jsonGenerator, os, filters, true);
+ public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
+ this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
}
- public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
- super(jsonGenerator, os, filters, inclusive);
+ public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
+ super(jsonGenerator, os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java
index 4828d8a752..792a54bf8c 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java
@@ -35,6 +35,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
+import java.util.Set;
/**
* A JSON based content implementation using Jackson.
@@ -46,30 +47,10 @@ public class JsonXContent implements XContent {
}
private static final JsonFactory jsonFactory;
- public static final String JSON_ALLOW_UNQUOTED_FIELD_NAMES = "elasticsearch.json.allow_unquoted_field_names";
public static final JsonXContent jsonXContent;
- public static final boolean unquotedFieldNamesSet;
static {
jsonFactory = new JsonFactory();
- // TODO: Remove the system property configuration for this in Elasticsearch 6.0.0
- String jsonUnquoteProp = System.getProperty(JSON_ALLOW_UNQUOTED_FIELD_NAMES);
- if (jsonUnquoteProp == null) {
- unquotedFieldNamesSet = false;
- jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, false);
- } else {
- unquotedFieldNamesSet = true;
- switch (jsonUnquoteProp) {
- case "true":
- jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
- break;
- case "false":
- jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, false);
- break;
- default:
- throw new IllegalArgumentException("invalid value for [" + JSON_ALLOW_UNQUOTED_FIELD_NAMES + "]: " + jsonUnquoteProp);
- }
- }
jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
@@ -92,8 +73,8 @@ public class JsonXContent implements XContent {
}
@Override
- public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
- return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
+ public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
+ return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java
index dd95e0d1df..74e1cb5e58 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java
@@ -27,10 +27,10 @@ import com.fasterxml.jackson.core.io.SerializedString;
import com.fasterxml.jackson.core.json.JsonWriteContext;
import com.fasterxml.jackson.core.util.DefaultIndenter;
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
+import com.fasterxml.jackson.core.util.JsonGeneratorDelegate;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentGenerator;
@@ -43,10 +43,10 @@ import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.Collections;
+import java.util.Objects;
+import java.util.Set;
-/**
- *
- */
public class JsonXContentGenerator implements XContentGenerator {
/** Generator used to write content **/
@@ -72,23 +72,38 @@ public class JsonXContentGenerator implements XContentGenerator {
private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue());
private boolean prettyPrint = false;
- public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
+ public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
+ this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
+ }
+
+ public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
+ Objects.requireNonNull(includes, "Including filters must not be null");
+ Objects.requireNonNull(excludes, "Excluding filters must not be null");
+ this.os = os;
if (jsonGenerator instanceof GeneratorBase) {
this.base = (GeneratorBase) jsonGenerator;
} else {
this.base = null;
}
- if (CollectionUtils.isEmpty(filters)) {
- this.generator = jsonGenerator;
- this.filter = null;
- } else {
- this.filter = new FilteringGeneratorDelegate(jsonGenerator,
- new FilterPathBasedFilter(filters, inclusive), true, true);
- this.generator = this.filter;
+ JsonGenerator generator = jsonGenerator;
+
+ boolean hasExcludes = excludes.isEmpty() == false;
+ if (hasExcludes) {
+ generator = new FilteringGeneratorDelegate(generator, new FilterPathBasedFilter(excludes, false), true, true);
}
- this.os = os;
+ boolean hasIncludes = includes.isEmpty() == false;
+ if (hasIncludes) {
+ generator = new FilteringGeneratorDelegate(generator, new FilterPathBasedFilter(includes, true), true, true);
+ }
+
+ if (hasExcludes || hasIncludes) {
+ this.filter = (FilteringGeneratorDelegate) generator;
+ } else {
+ this.filter = null;
+ }
+ this.generator = generator;
}
@Override
@@ -112,33 +127,34 @@ public class JsonXContentGenerator implements XContentGenerator {
writeLineFeedAtEnd = true;
}
- @Override
- public void writeStartArray() throws IOException {
- generator.writeStartArray();
- }
-
- @Override
- public void writeEndArray() throws IOException {
- generator.writeEndArray();
- }
-
- protected boolean isFiltered() {
+ private boolean isFiltered() {
return filter != null;
}
- protected boolean inRoot() {
+ private JsonGenerator getLowLevelGenerator() {
if (isFiltered()) {
- JsonStreamContext context = filter.getFilterContext();
- return ((context != null) && (context.inRoot() && context.getCurrentName() == null));
+ JsonGenerator delegate = filter.getDelegate();
+ if (delegate instanceof JsonGeneratorDelegate) {
+ // In case of combined inclusion and exclusion filters, we have one and only one another delegating level
+ delegate = ((JsonGeneratorDelegate) delegate).getDelegate();
+ assert delegate instanceof JsonGeneratorDelegate == false;
+ }
+ return delegate;
}
- return false;
+ return generator;
+ }
+
+ private boolean inRoot() {
+ JsonStreamContext context = generator.getOutputContext();
+ return ((context != null) && (context.inRoot() && context.getCurrentName() == null));
}
@Override
public void writeStartObject() throws IOException {
- if (isFiltered() && inRoot()) {
- // Bypass generator to always write the root start object
- filter.getDelegate().writeStartObject();
+ if (inRoot()) {
+ // Use the low level generator to write the startObject so that the root
+ // start object is always written even if a filtered generator is used
+ getLowLevelGenerator().writeStartObject();
return;
}
generator.writeStartObject();
@@ -146,126 +162,133 @@ public class JsonXContentGenerator implements XContentGenerator {
@Override
public void writeEndObject() throws IOException {
- if (isFiltered() && inRoot()) {
- // Bypass generator to always write the root end object
- filter.getDelegate().writeEndObject();
+ if (inRoot()) {
+ // Use the low level generator to write the startObject so that the root
+ // start object is always written even if a filtered generator is used
+ getLowLevelGenerator().writeEndObject();
return;
}
generator.writeEndObject();
}
+
@Override
- public void writeFieldName(String name) throws IOException {
- generator.writeFieldName(name);
+ public void writeStartArray() throws IOException {
+ generator.writeStartArray();
}
@Override
- public void writeString(String text) throws IOException {
- generator.writeString(text);
+ public void writeEndArray() throws IOException {
+ generator.writeEndArray();
}
@Override
- public void writeString(char[] text, int offset, int len) throws IOException {
- generator.writeString(text, offset, len);
+ public void writeFieldName(String name) throws IOException {
+ generator.writeFieldName(name);
}
@Override
- public void writeUTF8String(byte[] text, int offset, int length) throws IOException {
- generator.writeUTF8String(text, offset, length);
+ public void writeNull() throws IOException {
+ generator.writeNull();
}
@Override
- public void writeBinary(byte[] data, int offset, int len) throws IOException {
- generator.writeBinary(data, offset, len);
+ public void writeNullField(String name) throws IOException {
+ generator.writeNullField(name);
}
@Override
- public void writeBinary(byte[] data) throws IOException {
- generator.writeBinary(data);
+ public void writeBooleanField(String name, boolean value) throws IOException {
+ generator.writeBooleanField(name, value);
}
@Override
- public void writeNumber(int v) throws IOException {
- generator.writeNumber(v);
+ public void writeBoolean(boolean value) throws IOException {
+ generator.writeBoolean(value);
}
@Override
- public void writeNumber(long v) throws IOException {
- generator.writeNumber(v);
+ public void writeNumberField(String name, double value) throws IOException {
+ generator.writeNumberField(name, value);
}
@Override
- public void writeNumber(double d) throws IOException {
- generator.writeNumber(d);
+ public void writeNumber(double value) throws IOException {
+ generator.writeNumber(value);
}
@Override
- public void writeNumber(float f) throws IOException {
- generator.writeNumber(f);
+ public void writeNumberField(String name, float value) throws IOException {
+ generator.writeNumberField(name, value);
}
@Override
- public void writeBoolean(boolean state) throws IOException {
- generator.writeBoolean(state);
+ public void writeNumber(float value) throws IOException {
+ generator.writeNumber(value);
}
@Override
- public void writeNull() throws IOException {
- generator.writeNull();
+ public void writeNumberField(String name, int value) throws IOException {
+ generator.writeNumberField(name, value);
+ }
+
+ @Override
+ public void writeNumber(int value) throws IOException {
+ generator.writeNumber(value);
}
@Override
- public void writeStringField(String fieldName, String value) throws IOException {
- generator.writeStringField(fieldName, value);
+ public void writeNumberField(String name, long value) throws IOException {
+ generator.writeNumberField(name, value);
}
@Override
- public void writeBooleanField(String fieldName, boolean value) throws IOException {
- generator.writeBooleanField(fieldName, value);
+ public void writeNumber(long value) throws IOException {
+ generator.writeNumber(value);
}
@Override
- public void writeNullField(String fieldName) throws IOException {
- generator.writeNullField(fieldName);
+ public void writeNumber(short value) throws IOException {
+ generator.writeNumber(value);
}
@Override
- public void writeNumberField(String fieldName, int value) throws IOException {
- generator.writeNumberField(fieldName, value);
+ public void writeStringField(String name, String value) throws IOException {
+ generator.writeStringField(name, value);
}
@Override
- public void writeNumberField(String fieldName, long value) throws IOException {
- generator.writeNumberField(fieldName, value);
+ public void writeString(String value) throws IOException {
+ generator.writeString(value);
}
@Override
- public void writeNumberField(String fieldName, double value) throws IOException {
- generator.writeNumberField(fieldName, value);
+ public void writeString(char[] value, int offset, int len) throws IOException {
+ generator.writeString(value, offset, len);
}
@Override
- public void writeNumberField(String fieldName, float value) throws IOException {
- generator.writeNumberField(fieldName, value);
+ public void writeUTF8String(byte[] value, int offset, int length) throws IOException {
+ generator.writeUTF8String(value, offset, length);
}
@Override
- public void writeBinaryField(String fieldName, byte[] data) throws IOException {
- generator.writeBinaryField(fieldName, data);
+ public void writeBinaryField(String name, byte[] value) throws IOException {
+ generator.writeBinaryField(name, value);
}
@Override
- public void writeArrayFieldStart(String fieldName) throws IOException {
- generator.writeArrayFieldStart(fieldName);
+ public void writeBinary(byte[] value) throws IOException {
+ generator.writeBinary(value);
}
@Override
- public void writeObjectFieldStart(String fieldName) throws IOException {
- generator.writeObjectFieldStart(fieldName);
+ public void writeBinary(byte[] value, int offset, int len) throws IOException {
+ generator.writeBinary(value, offset, len);
}
- private void writeStartRaw(String fieldName) throws IOException {
- writeFieldName(fieldName);
+ private void writeStartRaw(String name) throws IOException {
+ writeFieldName(name);
generator.writeRaw(':');
}
@@ -279,7 +302,7 @@ public class JsonXContentGenerator implements XContentGenerator {
}
@Override
- public void writeRawField(String fieldName, InputStream content) throws IOException {
+ public void writeRawField(String name, InputStream content) throws IOException {
if (content.markSupported() == false) {
// needed for the XContentFactory.xContentType call
content = new BufferedInputStream(content);
@@ -291,11 +314,11 @@ public class JsonXContentGenerator implements XContentGenerator {
if (mayWriteRawData(contentType) == false) {
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(content)) {
parser.nextToken();
- writeFieldName(fieldName);
+ writeFieldName(name);
copyCurrentStructure(parser);
}
} else {
- writeStartRaw(fieldName);
+ writeStartRaw(name);
flush();
Streams.copy(content, os);
writeEndRaw();
@@ -303,16 +326,16 @@ public class JsonXContentGenerator implements XContentGenerator {
}
@Override
- public final void writeRawField(String fieldName, BytesReference content) throws IOException {
+ public final void writeRawField(String name, BytesReference content) throws IOException {
XContentType contentType = XContentFactory.xContentType(content);
if (contentType == null) {
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
}
if (mayWriteRawData(contentType) == false) {
- writeFieldName(fieldName);
+ writeFieldName(name);
copyRawValue(content, contentType.xContent());
} else {
- writeStartRaw(fieldName);
+ writeStartRaw(name);
flush();
content.writeTo(os);
writeEndRaw();
@@ -386,11 +409,12 @@ public class JsonXContentGenerator implements XContentGenerator {
}
JsonStreamContext context = generator.getOutputContext();
if ((context != null) && (context.inRoot() == false)) {
- throw new IOException("unclosed object or array found");
+ throw new IOException("Unclosed object or array found");
}
if (writeLineFeedAtEnd) {
flush();
- generator.writeRaw(LF);
+ // Bypass generator to always write the line feed
+ getLowLevelGenerator().writeRaw(LF);
}
generator.close();
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java
index 629f8612ea..94ac9b9435 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java
@@ -35,6 +35,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
+import java.util.Set;
/**
* A Smile based content implementation using Jackson.
@@ -71,8 +72,8 @@ public class SmileXContent implements XContent {
}
@Override
- public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
- return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
+ public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
+ return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java
index ac294c1db8..afa420805f 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java
@@ -20,23 +20,22 @@
package org.elasticsearch.common.xcontent.smile;
import com.fasterxml.jackson.core.JsonGenerator;
-
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.OutputStream;
+import java.util.Collections;
+import java.util.Set;
-/**
- *
- */
public class SmileXContentGenerator extends JsonXContentGenerator {
- public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
- this(jsonGenerator, os, filters, true);
+ public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
+ this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
}
- public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
- super(jsonGenerator, os, filters, inclusive);
+ public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
+ super(jsonGenerator, os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
index eaaa98167b..9f313a59b9 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
@@ -300,16 +300,7 @@ public abstract class AbstractXContentParser implements XContentParser {
} else if (token == XContentParser.Token.VALUE_STRING) {
return parser.text();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
- XContentParser.NumberType numberType = parser.numberType();
- if (numberType == XContentParser.NumberType.INT) {
- return parser.intValue();
- } else if (numberType == XContentParser.NumberType.LONG) {
- return parser.longValue();
- } else if (numberType == XContentParser.NumberType.FLOAT) {
- return parser.floatValue();
- } else if (numberType == XContentParser.NumberType.DOUBLE) {
- return parser.doubleValue();
- }
+ return parser.numberValue();
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
return parser.booleanValue();
} else if (token == XContentParser.Token.START_OBJECT) {
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java
index 9d7961ec0b..a70e385d52 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java
@@ -21,10 +21,10 @@
package org.elasticsearch.common.xcontent.support.filtering;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
public class FilterPath {
@@ -75,8 +75,8 @@ public class FilterPath {
return next;
}
- public static FilterPath[] compile(String... filters) {
- if (CollectionUtils.isEmpty(filters)) {
+ public static FilterPath[] compile(Set<String> filters) {
+ if (filters == null || filters.isEmpty()) {
return null;
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java
index 69e4e79110..846e172ae6 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java
@@ -24,6 +24,7 @@ import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
public class FilterPathBasedFilter extends TokenFilter {
@@ -53,7 +54,7 @@ public class FilterPathBasedFilter extends TokenFilter {
this.filters = filters;
}
- public FilterPathBasedFilter(String[] filters, boolean inclusive) {
+ public FilterPathBasedFilter(Set<String> filters, boolean inclusive) {
this(FilterPath.compile(filters), inclusive);
}
@@ -103,11 +104,6 @@ public class FilterPathBasedFilter extends TokenFilter {
@Override
protected boolean _includeScalar() {
- for (FilterPath filter : filters) {
- if (filter.matches()) {
- return inclusive;
- }
- }
return !inclusive;
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java
index 27a0dd46e0..54da03118d 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java
@@ -34,6 +34,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
+import java.util.Set;
/**
* A YAML based content implementation using Jackson.
@@ -66,8 +67,8 @@ public class YamlXContent implements XContent {
}
@Override
- public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
- return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
+ public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
+ return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java
index f801401a22..d2c53c8a02 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java
@@ -20,23 +20,22 @@
package org.elasticsearch.common.xcontent.yaml;
import com.fasterxml.jackson.core.JsonGenerator;
-
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.OutputStream;
+import java.util.Collections;
+import java.util.Set;
-/**
- *
- */
public class YamlXContentGenerator extends JsonXContentGenerator {
- public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
- this(jsonGenerator, os, filters, true);
+ public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
+ this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
}
- public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
- super(jsonGenerator, os, filters, inclusive);
+ public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
+ super(jsonGenerator, os, includes, excludes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java
index 26e7898959..b432d0538c 100644
--- a/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java
+++ b/core/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java
@@ -18,8 +18,10 @@
*/
package org.elasticsearch.discovery;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.util.Set;
@@ -31,7 +33,7 @@ import java.util.Set;
*/
public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler {
- private static final ESLogger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName());
+ private static final Logger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName());
private final Discovery.AckListener ackListener;
@@ -68,7 +70,7 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP
ackListener.onNodeAck(node, e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.debug("error while processing ack for node [{}]", inner, node);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
index 040066adeb..b41316b653 100644
--- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
+++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
@@ -19,7 +19,6 @@
package org.elasticsearch.discovery;
-import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Setting;
@@ -27,8 +26,8 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java
index 74c6c3b7f3..fc419ff06a 100644
--- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java
+++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java
@@ -22,26 +22,34 @@ package org.elasticsearch.discovery;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
import java.io.IOException;
-public class DiscoveryStats implements Streamable, ToXContent {
+public class DiscoveryStats implements Writeable, ToXContent {
@Nullable
- private PendingClusterStateStats queueStats;
+ private final PendingClusterStateStats queueStats;
public DiscoveryStats(PendingClusterStateStats queueStats) {
this.queueStats = queueStats;
}
+ public DiscoveryStats(StreamInput in) throws IOException {
+ queueStats = in.readOptionalWriteable(PendingClusterStateStats::new);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalWriteable(queueStats);
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.DISCOVERY);
-
if (queueStats != null ){
queueStats.toXContent(builder, params);
}
@@ -49,24 +57,6 @@ public class DiscoveryStats implements Streamable, ToXContent {
return builder;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
- if (in.readBoolean()) {
- queueStats = new PendingClusterStateStats();
- queueStats.readFrom(in);
- }
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- if (queueStats != null ) {
- out.writeBoolean(true);
- queueStats.writeTo(out);
- }else{
- out.writeBoolean(false);
- }
- }
-
static final class Fields {
static final String DISCOVERY = "discovery";
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java
index c544db4047..6b943bde78 100644
--- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java
+++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java
@@ -19,6 +19,8 @@
package org.elasticsearch.discovery.local;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@@ -29,7 +31,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@@ -45,6 +46,7 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
+import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
import java.util.HashSet;
import java.util.Optional;
@@ -144,7 +146,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
} else if (firstMaster != null) {
@@ -164,16 +166,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
}
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build();
- RoutingAllocation.Result result = master.allocationService.reroute(currentState, "node_add");
- if (result.changed()) {
- currentState = ClusterState.builder(currentState).routingResult(result).build();
- }
- return currentState;
+ return master.allocationService.reroute(currentState, "node_add");
}
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
@@ -231,14 +229,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
}
// reroute here, so we eagerly remove dead nodes from the routing
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
- RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes(
- ClusterState.builder(updatedState).build(), true, "node stopped");
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ return master.allocationService.deassociateDeadNodes(updatedState, true, "node stopped");
}
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
}
@@ -279,7 +275,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public DiscoveryStats stats() {
- return new DiscoveryStats(null);
+ return new DiscoveryStats((PendingClusterStateStats)null);
}
@Override
@@ -329,7 +325,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
} catch (IncompatibleClusterStateVersionException ex) {
- logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex);
}
}
if (newNodeSpecificClusterState == null) {
@@ -380,7 +376,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
publishResponseHandler.onFailure(discovery.localNode(), e);
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java
index 3ef9138f93..1d11f5cf0f 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java
@@ -17,11 +17,10 @@
* under the License.
*/
-package org.elasticsearch.discovery.zen.elect;
+package org.elasticsearch.discovery.zen;
import com.carrotsearch.hppc.ObjectContainer;
import org.apache.lucene.util.CollectionUtil;
-import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
@@ -33,9 +32,11 @@ import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Comparator;
+import java.util.Collection;
import java.util.Iterator;
import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
/**
*
@@ -45,17 +46,64 @@ public class ElectMasterService extends AbstractComponent {
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING =
Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope);
- // This is the minimum version a master needs to be on, otherwise it gets ignored
- // This is based on the minimum compatible version of the current version this node is on
- private final Version minMasterVersion;
- private final NodeComparator nodeComparator = new NodeComparator();
-
private volatile int minimumMasterNodes;
+ /**
+ * a class to encapsulate all the information about a candidate in a master election
+ * that is needed to decided which of the candidates should win
+ */
+ public static class MasterCandidate {
+
+ public static final long UNRECOVERED_CLUSTER_VERSION = -1;
+
+ final DiscoveryNode node;
+
+ final long clusterStateVersion;
+
+ public MasterCandidate(DiscoveryNode node, long clusterStateVersion) {
+ Objects.requireNonNull(node);
+ assert clusterStateVersion >= -1 : "got: " + clusterStateVersion;
+ assert node.isMasterNode();
+ this.node = node;
+ this.clusterStateVersion = clusterStateVersion;
+ }
+
+ public DiscoveryNode getNode() {
+ return node;
+ }
+
+ public long getClusterStateVersion() {
+ return clusterStateVersion;
+ }
+
+ @Override
+ public String toString() {
+ return "Candidate{" +
+ "node=" + node +
+ ", clusterStateVersion=" + clusterStateVersion +
+ '}';
+ }
+
+ /**
+ * compares two candidates to indicate which the a better master.
+ * A higher cluster state version is better
+ *
+ * @return -1 if c1 is a batter candidate, 1 if c2.
+ */
+ public static int compare(MasterCandidate c1, MasterCandidate c2) {
+ // we explicitly swap c1 and c2 here. the code expects "better" is lower in a sorted
+ // list, so if c2 has a higher cluster state version, it needs to come first.
+ int ret = Long.compare(c2.clusterStateVersion, c1.clusterStateVersion);
+ if (ret == 0) {
+ ret = compareNodes(c1.getNode(), c2.getNode());
+ }
+ return ret;
+ }
+ }
+
@Inject
public ElectMasterService(Settings settings) {
super(settings);
- this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion();
this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}
@@ -69,16 +117,41 @@ public class ElectMasterService extends AbstractComponent {
}
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
- if (minimumMasterNodes < 1) {
- return true;
- }
int count = 0;
for (DiscoveryNode node : nodes) {
if (node.isMasterNode()) {
count++;
}
}
- return count >= minimumMasterNodes;
+ return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
+ }
+
+ public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
+ if (candidates.isEmpty()) {
+ return false;
+ }
+ if (minimumMasterNodes < 1) {
+ return true;
+ }
+ assert candidates.stream().map(MasterCandidate::getNode).collect(Collectors.toSet()).size() == candidates.size() :
+ "duplicates ahead: " + candidates;
+ return candidates.size() >= minimumMasterNodes;
+ }
+
+ /**
+ * Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
+ * if no master has been elected.
+ */
+ public MasterCandidate electMaster(Collection<MasterCandidate> candidates) {
+ assert hasEnoughCandidates(candidates);
+ List<MasterCandidate> sortedCandidates = new ArrayList<>(candidates);
+ sortedCandidates.sort(MasterCandidate::compare);
+ return sortedCandidates.get(0);
+ }
+
+ /** selects the best active master to join, where multiple are discovered */
+ public DiscoveryNode tieBreakActiveMasters(Collection<DiscoveryNode> activeMasters) {
+ return activeMasters.stream().min(ElectMasterService::compareNodes).get();
}
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
@@ -107,7 +180,7 @@ public class ElectMasterService extends AbstractComponent {
*/
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);
- CollectionUtil.introSort(sortedNodes, nodeComparator);
+ CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);
return sortedNodes;
}
@@ -130,25 +203,6 @@ public class ElectMasterService extends AbstractComponent {
return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
}
- /**
- * Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
- * if no master has been elected.
- */
- public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
- List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
- if (sortedNodes == null || sortedNodes.isEmpty()) {
- return null;
- }
- DiscoveryNode masterNode = sortedNodes.get(0);
- // Sanity check: maybe we don't end up here, because serialization may have failed.
- if (masterNode.getVersion().before(minMasterVersion)) {
- logger.warn("ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion);
- return null;
- } else {
- return masterNode;
- }
- }
-
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
List<DiscoveryNode> possibleNodes = CollectionUtils.iterableAsArrayList(nodes);
if (possibleNodes.isEmpty()) {
@@ -161,21 +215,18 @@ public class ElectMasterService extends AbstractComponent {
it.remove();
}
}
- CollectionUtil.introSort(possibleNodes, nodeComparator);
+ CollectionUtil.introSort(possibleNodes, ElectMasterService::compareNodes);
return possibleNodes;
}
- private static class NodeComparator implements Comparator<DiscoveryNode> {
-
- @Override
- public int compare(DiscoveryNode o1, DiscoveryNode o2) {
- if (o1.isMasterNode() && !o2.isMasterNode()) {
- return -1;
- }
- if (!o1.isMasterNode() && o2.isMasterNode()) {
- return 1;
- }
- return o1.getId().compareTo(o2.getId());
+ /** master nodes go before other nodes, with a secondary sort by id **/
+ private static int compareNodes(DiscoveryNode o1, DiscoveryNode o2) {
+ if (o1.isMasterNode() && !o2.isMasterNode()) {
+ return -1;
+ }
+ if (!o1.isMasterNode() && o2.isMasterNode()) {
+ return 1;
}
+ return o1.getId().compareTo(o2.getId());
}
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java
index 1f4f57c4ed..75cce69535 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java
@@ -18,6 +18,9 @@
*/
package org.elasticsearch.discovery.zen;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -30,16 +33,13 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoverySettings;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import java.util.ArrayList;
@@ -348,13 +348,13 @@ public class NodeJoinController extends AbstractComponent {
static class JoinTaskListener implements ClusterStateTaskListener {
final List<MembershipAction.JoinCallback> callbacks;
- private final ESLogger logger;
+ private final Logger logger;
- JoinTaskListener(MembershipAction.JoinCallback callback, ESLogger logger) {
+ JoinTaskListener(MembershipAction.JoinCallback callback, Logger logger) {
this(Collections.singletonList(callback), logger);
}
- JoinTaskListener(List<MembershipAction.JoinCallback> callbacks, ESLogger logger) {
+ JoinTaskListener(List<MembershipAction.JoinCallback> callbacks, Logger logger) {
this.callbacks = callbacks;
this.logger = logger;
}
@@ -365,7 +365,7 @@ public class NodeJoinController extends AbstractComponent {
try {
callback.onFailure(e);
} catch (Exception inner) {
- logger.error("error handling task failure [{}]", inner, e);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner);
}
}
}
@@ -376,7 +376,7 @@ public class NodeJoinController extends AbstractComponent {
try {
callback.onSuccess();
} catch (Exception e) {
- logger.error("unexpected error during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error during [{}]", source), e);
}
}
}
@@ -455,17 +455,12 @@ public class NodeJoinController extends AbstractComponent {
if (nodesChanged) {
newState.nodes(nodesBuilder);
- final ClusterState tmpState = newState.build();
- RoutingAllocation.Result result = allocationService.reroute(tmpState, "node_join");
- newState = ClusterState.builder(tmpState);
- if (result.changed()) {
- newState.routingResult(result);
- }
+ return results.build(allocationService.reroute(newState.build(), "node_join"));
+ } else {
+ // we must return a new cluster state instance to force publishing. This is important
+ // for the joining node to finalize its join and set us as a master
+ return results.build(newState.build());
}
-
- // we must return a new cluster state instance to force publishing. This is important
- // for the joining node to finalize its join and set us as a master
- return results.build(newState.build());
}
private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List<DiscoveryNode> joiningNodes) {
@@ -485,9 +480,8 @@ public class NodeJoinController extends AbstractComponent {
// now trim any left over dead nodes - either left there when the previous master stepped down
// or removed by us above
ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build();
- RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false,
- "removed dead nodes on election");
- return ClusterState.builder(tmpState).routingResult(result);
+ return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false,
+ "removed dead nodes on election"));
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
index d380b1fd60..833349e9d9 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
@@ -19,9 +19,11 @@
package org.elasticsearch.discovery.zen;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@@ -36,7 +38,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@@ -45,7 +46,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -54,7 +54,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
@@ -74,13 +73,10 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
@@ -93,7 +89,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);
public static final Setting<TimeValue> JOIN_TIMEOUT_SETTING =
Setting.timeSetting("discovery.zen.join_timeout",
- settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(),
+ settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20),
TimeValue.timeValueMillis(0), Property.NodeScope);
public static final Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING =
Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope);
@@ -105,7 +101,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope);
public static final Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING =
Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
- settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0),
+ settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2), TimeValue.timeValueMillis(0),
Property.NodeScope);
public static final Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
@@ -144,9 +140,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private final JoinThreadControl joinThreadControl;
- /** counts the time this node has joined the cluster or have elected it self as master */
- private final AtomicLong clusterJoinsCounter = new AtomicLong();
-
// must initialized in doStart(), when we have the allocationService set
private volatile NodeJoinController nodeJoinController;
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
@@ -259,7 +252,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
try {
membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1));
} catch (Exception e) {
- logger.debug("failed to send leave request to master [{}]", e, nodes.getMasterNode());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e);
}
} else {
// we're master -> let other potential master we left and start a master election now rather then wait for masterFD
@@ -271,7 +264,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
try {
membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster);
} catch (Exception e) {
- logger.debug("failed to send leave request from master [{}] to possible master [{}]", e, nodes.getMasterNode(), possibleMaster);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e);
}
}
}
@@ -282,8 +275,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
protected void doClose() {
masterFD.close();
nodesFD.close();
- publishClusterState.close();
- membership.close();
pingService.close();
}
@@ -304,8 +295,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}
@Override
- public boolean nodeHasJoinedClusterOnce() {
- return clusterJoinsCounter.get() > 0;
+ public ClusterState clusterState() {
+ return clusterService.state();
}
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@@ -316,7 +307,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
throw new IllegalStateException("Shouldn't publish state when not master");
}
- nodesFD.updateNodesAndPing(clusterChangedEvent.state());
+
try {
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
} catch (FailedToCommitClusterStateException t) {
@@ -330,12 +321,23 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
throw t;
}
+
+ // update the set of nodes to ping after the new cluster state has been published
+ nodesFD.updateNodesAndPing(clusterChangedEvent.state());
+ }
+
+ /**
+ * Gets the current set of nodes involved in the node fault detection.
+ * NB: for testing purposes
+ */
+ public Set<DiscoveryNode> getFaultDetectionNodes() {
+ return nodesFD.getNodes();
}
@Override
@@ -395,8 +397,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
joinThreadControl.markThreadAsDone(currentThread);
// we only starts nodesFD if we are master (it may be that we received a cluster state while pinging)
nodesFD.updateNodesAndPing(state); // start the nodes FD
- long count = clusterJoinsCounter.incrementAndGet();
- logger.trace("cluster joins counter set to [{}] (elected as master)", count);
}
@Override
@@ -467,7 +467,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
// first, make sure we can connect to the master
transportService.connectToNode(masterNode);
} catch (Exception e) {
- logger.warn("failed to connect to master [{}], retrying...", e, masterNode);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e);
return false;
}
int joinAttempt = 0; // we retry on illegal state if the master is not yet ready
@@ -487,7 +487,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}
} else {
if (logger.isTraceEnabled()) {
- logger.trace("failed to send join request to master [{}]", e, masterNode);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e);
} else {
logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e));
}
@@ -509,7 +509,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private final AllocationService allocationService;
private final ElectMasterService electMasterService;
private final BiFunction<ClusterState, String, ClusterState> rejoin;
- private final ESLogger logger;
+ private final Logger logger;
static class Task {
@@ -539,7 +539,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
final AllocationService allocationService,
final ElectMasterService electMasterService,
final BiFunction<ClusterState, String, ClusterState> rejoin,
- final ESLogger logger) {
+ final Logger logger) {
this.allocationService = allocationService;
this.electMasterService = electMasterService;
this.rejoin = rejoin;
@@ -570,9 +570,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
} else {
- final RoutingAllocation.Result routingResult =
- allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks));
- return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build());
+ return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)));
}
}
@@ -585,7 +583,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(final String source, final Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
@Override
@@ -657,7 +655,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
@Override
@@ -677,7 +675,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
return;
}
- logger.info("master_left [{}], reason [{}]", cause, masterNode, reason);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@@ -706,7 +704,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
@Override
@@ -753,9 +751,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
// its a fresh update from the master as we transition from a start of not having a master to having one
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId());
- long count = clusterJoinsCounter.incrementAndGet();
- logger.trace("updated cluster join cluster to [{}]", count);
-
return newClusterState;
}
@@ -791,13 +786,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
if (newClusterState != null) {
try {
publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.error("unexpected exception while failing [{}]", inner, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", source), inner);
}
}
}
@@ -821,7 +816,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
* If the first condition fails we reject the cluster state and throw an error.
* If the second condition fails we ignore the cluster state.
*/
- public static boolean shouldIgnoreOrRejectNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) {
+ public static boolean shouldIgnoreOrRejectNewClusterState(Logger logger, ClusterState currentState, ClusterState newClusterState) {
validateStateIsFromCurrentMaster(logger, currentState.nodes(), newClusterState);
// reject cluster states that are not new from the same master
@@ -845,7 +840,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
* This method checks for this and throws an exception if needed
*/
- public static void validateStateIsFromCurrentMaster(ESLogger logger, DiscoveryNodes currentNodes, ClusterState newClusterState) {
+ public static void validateStateIsFromCurrentMaster(Logger logger, DiscoveryNodes currentNodes, ClusterState newClusterState) {
if (currentNodes.getMasterNodeId() == null) {
return;
}
@@ -862,16 +857,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
} else if (nodeJoinController == null) {
throw new IllegalStateException("discovery module is not yet started");
} else {
- // The minimum supported version for a node joining a master:
- Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion();
- // Sanity check: maybe we don't end up here, because serialization may have failed.
- if (node.getVersion().before(minimumNodeJoinVersion)) {
- callback.onFailure(
- new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
- );
- return;
- }
-
// try and connect to the node, if it fails, we can raise an exception back to the client...
transportService.connectToNode(node);
@@ -880,7 +865,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
try {
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
} catch (Exception e) {
- logger.warn("failed to validate incoming join request from node [{}]", e, node);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e);
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
return;
}
@@ -890,14 +875,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private DiscoveryNode findMaster() {
logger.trace("starting to ping");
- ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout);
+ List<ZenPing.PingResponse> fullPingResponses = pingService.pingAndWait(pingTimeout).toList();
if (fullPingResponses == null) {
logger.trace("No full ping responses");
return null;
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
- if (fullPingResponses.length == 0) {
+ if (fullPingResponses.size() == 0) {
sb.append(" {none}");
} else {
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
@@ -907,69 +892,57 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
logger.trace("full ping responses:{}", sb);
}
+ final DiscoveryNode localNode = clusterService.localNode();
+
+ // add our selves
+ assert fullPingResponses.stream().map(ZenPing.PingResponse::node)
+ .filter(n -> n.equals(localNode)).findAny().isPresent() == false;
+
+ fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state()));
+
// filter responses
final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
- final DiscoveryNode localNode = clusterService.localNode();
- List<DiscoveryNode> pingMasters = new ArrayList<>();
+ List<DiscoveryNode> activeMasters = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : pingResponses) {
- if (pingResponse.master() != null) {
- // We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
- // any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
- if (!localNode.equals(pingResponse.master())) {
- pingMasters.add(pingResponse.master());
- }
+ // We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
+ // any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
+ if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) {
+ activeMasters.add(pingResponse.master());
}
}
// nodes discovered during pinging
- Set<DiscoveryNode> activeNodes = new HashSet<>();
- // nodes discovered who has previously been part of the cluster and do not ping for the very first time
- Set<DiscoveryNode> joinedOnceActiveNodes = new HashSet<>();
- if (localNode.isMasterNode()) {
- activeNodes.add(localNode);
- long joinsCounter = clusterJoinsCounter.get();
- if (joinsCounter > 0) {
- logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
- joinedOnceActiveNodes.add(localNode);
- }
- }
+ List<ElectMasterService.MasterCandidate> masterCandidates = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : pingResponses) {
- activeNodes.add(pingResponse.node());
- if (pingResponse.hasJoinedOnce()) {
- joinedOnceActiveNodes.add(pingResponse.node());
+ if (pingResponse.node().isMasterNode()) {
+ masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion()));
}
}
- if (pingMasters.isEmpty()) {
- if (electMaster.hasEnoughMasterNodes(activeNodes)) {
- // we give preference to nodes who have previously already joined the cluster. Those will
- // have a cluster state in memory, including an up to date routing table (which is not persistent to disk
- // by the gateway)
- DiscoveryNode master = electMaster.electMaster(joinedOnceActiveNodes);
- if (master != null) {
- return master;
- }
- return electMaster.electMaster(activeNodes);
+ if (activeMasters.isEmpty()) {
+ if (electMaster.hasEnoughCandidates(masterCandidates)) {
+ final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates);
+ logger.trace("candidate {} won election", winner);
+ return winner.getNode();
} else {
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
- logger.trace("not enough master nodes [{}]", activeNodes);
+ logger.trace("not enough master nodes [{}]", masterCandidates);
return null;
}
} else {
-
- assert !pingMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
+ assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
// lets tie break between discovered nodes
- return electMaster.electMaster(pingMasters);
+ return electMaster.tieBreakActiveMasters(activeMasters);
}
}
- static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, ESLogger logger) {
+ static List<ZenPing.PingResponse> filterPingResponses(List<ZenPing.PingResponse> fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
List<ZenPing.PingResponse> pingResponses;
if (masterElectionIgnoreNonMasters) {
- pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
+ pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
} else {
- pingResponses = Arrays.asList(fullPingResponses);
+ pingResponses = fullPingResponses;
}
if (logger.isDebugEnabled()) {
@@ -1034,11 +1007,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void handleException(TransportException exp) {
- logger.warn("failed to send rejoin request to [{}]", exp, otherMaster);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
}
});
} catch (Exception e) {
- logger.warn("failed to send rejoin request to [{}]", e, otherMaster);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
}
return localClusterState;
}
@@ -1157,7 +1130,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java
index bca13211c6..04aee9db3d 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java
@@ -19,6 +19,8 @@
package org.elasticsearch.discovery.zen.fd;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@@ -34,7 +36,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
@@ -42,6 +43,7 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@@ -166,7 +168,6 @@ public class MasterFaultDetection extends FaultDetection {
super.close();
stop("closing");
this.listeners.clear();
- transportService.removeHandler(MASTER_PING_ACTION_NAME);
}
@Override
@@ -283,8 +284,13 @@ public class MasterFaultDetection extends FaultDetection {
}
int retryCount = ++MasterFaultDetection.this.retryCount;
- logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount,
- pingRetryCount);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[master] failed to ping [{}], retry [{}] out of [{}]",
+ masterNode,
+ retryCount,
+ pingRetryCount),
+ exp);
if (retryCount >= pingRetryCount) {
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout",
masterNode, pingRetryCount, pingRetryTimeout);
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java
index 4f17b14ff6..6361d3cde3 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java
@@ -19,6 +19,8 @@
package org.elasticsearch.discovery.zen.fd;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -28,7 +30,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
@@ -36,9 +37,12 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -90,6 +94,14 @@ public class NodesFaultDetection extends FaultDetection {
}
/**
+ * Gets the current set of nodes involved in node fault detection.
+ * NB: For testing purposes.
+ */
+ public Set<DiscoveryNode> getNodes() {
+ return Collections.unmodifiableSet(nodesFD.keySet());
+ }
+
+ /**
* make sure that nodes in clusterState are pinged. Any pinging to nodes which are not
* part of the cluster will be stopped
*/
@@ -127,7 +139,6 @@ public class NodesFaultDetection extends FaultDetection {
public void close() {
super.close();
stop();
- transportService.removeHandler(PING_ACTION_NAME);
}
@Override
@@ -166,7 +177,12 @@ public class NodesFaultDetection extends FaultDetection {
}
});
} catch (EsRejectedExecutionException ex) {
- logger.trace("[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", ex, node, reason);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down",
+ node,
+ reason),
+ ex);
}
}
@@ -231,7 +247,13 @@ public class NodesFaultDetection extends FaultDetection {
}
retryCount++;
- logger.trace("[node ] failed to ping [{}], retry [{}] out of [{}]", exp, node, retryCount, pingRetryCount);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[node ] failed to ping [{}], retry [{}] out of [{}]",
+ node,
+ retryCount,
+ pingRetryCount),
+ exp);
if (retryCount >= pingRetryCount) {
logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node,
pingRetryCount, pingRetryTimeout);
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
index 961b8d7972..8740d12c5f 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
@@ -76,12 +76,6 @@ public class MembershipAction extends AbstractComponent {
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
}
- public void close() {
- transportService.removeHandler(DISCOVERY_JOIN_ACTION_NAME);
- transportService.removeHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME);
- transportService.removeHandler(DISCOVERY_LEAVE_ACTION_NAME);
- }
-
public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) {
transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME);
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java
index 568bc3ec16..0bcc8b37d8 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java
@@ -19,6 +19,7 @@
package org.elasticsearch.discovery.zen.ping;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
/**
@@ -26,7 +27,7 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
*/
public interface PingContextProvider extends DiscoveryNodesProvider {
- /** return true if this node has previously joined the cluster at least once. False if this is first join */
- boolean nodeHasJoinedClusterOnce();
+ /** return the current cluster state of the node */
+ ClusterState clusterState();
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java
index 5a9f5f463e..b4bb61ad46 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java
@@ -20,30 +20,42 @@
package org.elasticsearch.discovery.zen.ping;
import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
+import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
+
public interface ZenPing extends LifecycleComponent {
void setPingContextProvider(PingContextProvider contextProvider);
void ping(PingListener listener, TimeValue timeout);
- public interface PingListener {
+ interface PingListener {
- void onPing(PingResponse[] pings);
+ /**
+ * called when pinging is done.
+ *
+ * @param pings ping result *must
+ */
+ void onPing(Collection<PingResponse> pings);
}
- public static class PingResponse implements Streamable {
+ class PingResponse implements Streamable {
public static final PingResponse[] EMPTY = new PingResponse[0];
@@ -59,29 +71,36 @@ public interface ZenPing extends LifecycleComponent {
private DiscoveryNode master;
- private boolean hasJoinedOnce;
+ private long clusterStateVersion;
private PingResponse() {
}
/**
- * @param node the node which this ping describes
- * @param master the current master of the node
- * @param clusterName the cluster name of the node
- * @param hasJoinedOnce true if the joined has successfully joined the cluster before
+ * @param node the node which this ping describes
+ * @param master the current master of the node
+ * @param clusterName the cluster name of the node
+ * @param clusterStateVersion the current cluster state version of that node
+ * ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} for not recovered)
*/
- public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, boolean hasJoinedOnce) {
+ public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) {
this.id = idGenerator.incrementAndGet();
this.node = node;
this.master = master;
this.clusterName = clusterName;
- this.hasJoinedOnce = hasJoinedOnce;
+ this.clusterStateVersion = clusterStateVersion;
}
- /**
- * an always increasing unique identifier for this ping response.
- * lower values means older pings.
- */
+ public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterState state) {
+ this(node, master, state.getClusterName(),
+ state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ?
+ ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION : state.version());
+ }
+
+ /**
+ * an always increasing unique identifier for this ping response.
+ * lower values means older pings.
+ */
public long id() {
return this.id;
}
@@ -100,9 +119,11 @@ public interface ZenPing extends LifecycleComponent {
return master;
}
- /** true if the joined has successfully joined the cluster before */
- public boolean hasJoinedOnce() {
- return hasJoinedOnce;
+ /**
+ * the current cluster state version of that node ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION}
+ * for not recovered) */
+ public long getClusterStateVersion() {
+ return clusterStateVersion;
}
public static PingResponse readPingResponse(StreamInput in) throws IOException {
@@ -118,7 +139,7 @@ public interface ZenPing extends LifecycleComponent {
if (in.readBoolean()) {
master = new DiscoveryNode(in);
}
- this.hasJoinedOnce = in.readBoolean();
+ this.clusterStateVersion = in.readLong();
this.id = in.readLong();
}
@@ -132,13 +153,14 @@ public interface ZenPing extends LifecycleComponent {
out.writeBoolean(true);
master.writeTo(out);
}
- out.writeBoolean(hasJoinedOnce);
+ out.writeLong(clusterStateVersion);
out.writeLong(id);
}
@Override
public String toString() {
- return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], hasJoinedOnce [" + hasJoinedOnce + "], cluster_name[" + clusterName.value() + "]}";
+ return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], cluster_state_version [" + clusterStateVersion
+ + "], cluster_name[" + clusterName.value() + "]}";
}
}
@@ -146,7 +168,7 @@ public interface ZenPing extends LifecycleComponent {
/**
* a utility collection of pings where only the most recent ping is stored per node
*/
- public static class PingCollection {
+ class PingCollection {
Map<DiscoveryNode, PingResponse> pings;
@@ -171,15 +193,15 @@ public interface ZenPing extends LifecycleComponent {
}
/** adds multiple pings if newer than previous pings from the same node */
- public synchronized void addPings(PingResponse[] pings) {
+ public synchronized void addPings(Iterable<PingResponse> pings) {
for (PingResponse ping : pings) {
addPing(ping);
}
}
- /** serialize current pings to an array */
- public synchronized PingResponse[] toArray() {
- return pings.values().toArray(new PingResponse[pings.size()]);
+ /** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */
+ public synchronized List<PingResponse> toList() {
+ return new ArrayList<>(pings.values());
}
/** the number of nodes for which there are known pings */
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java
index bd5855666a..3a2ddc10cf 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java
@@ -23,17 +23,15 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.AtomicBoolean;
-public class ZenPingService extends AbstractLifecycleComponent implements ZenPing {
+public class ZenPingService extends AbstractLifecycleComponent {
private List<ZenPing> zenPings = Collections.emptyList();
@@ -47,7 +45,6 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
return this.zenPings;
}
- @Override
public void setPingContextProvider(PingContextProvider contextProvider) {
if (lifecycle.started()) {
throw new IllegalStateException("Can't set nodes provider when started");
@@ -78,60 +75,31 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
}
}
- public PingResponse[] pingAndWait(TimeValue timeout) {
- final AtomicReference<PingResponse[]> response = new AtomicReference<>();
- final CountDownLatch latch = new CountDownLatch(1);
- ping(new PingListener() {
- @Override
- public void onPing(PingResponse[] pings) {
- response.set(pings);
- latch.countDown();
+ public ZenPing.PingCollection pingAndWait(TimeValue timeout) {
+ final ZenPing.PingCollection response = new ZenPing.PingCollection();
+ final CountDownLatch latch = new CountDownLatch(zenPings.size());
+ for (ZenPing zenPing : zenPings) {
+ final AtomicBoolean counted = new AtomicBoolean();
+ try {
+ zenPing.ping(pings -> {
+ response.addPings(pings);
+ if (counted.compareAndSet(false, true)) {
+ latch.countDown();
+ }
+ }, timeout);
+ } catch (Exception ex) {
+ logger.warn("Ping execution failed", ex);
+ if (counted.compareAndSet(false, true)) {
+ latch.countDown();
+ }
}
- }, timeout);
+ }
try {
latch.await();
- return response.get();
+ return response;
} catch (InterruptedException e) {
logger.trace("pingAndWait interrupted");
- return null;
- }
- }
-
- @Override
- public void ping(PingListener listener, TimeValue timeout) {
- List<? extends ZenPing> zenPings = this.zenPings;
- CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings);
- for (ZenPing zenPing : zenPings) {
- try {
- zenPing.ping(compoundPingListener, timeout);
- } catch (EsRejectedExecutionException ex) {
- logger.debug("Ping execution rejected", ex);
- compoundPingListener.onPing(null);
- }
- }
- }
-
- private static class CompoundPingListener implements PingListener {
-
- private final PingListener listener;
-
- private final AtomicInteger counter;
-
- private PingCollection responses = new PingCollection();
-
- private CompoundPingListener(PingListener listener, List<? extends ZenPing> zenPings) {
- this.listener = listener;
- this.counter = new AtomicInteger(zenPings.size());
- }
-
- @Override
- public void onPing(PingResponse[] pings) {
- if (pings != null) {
- responses.addPings(pings);
- }
- if (counter.decrementAndGet() == 0) {
- listener.onPing(responses.toArray());
- }
+ return response;
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java
index e1466651da..637730c75f 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java
@@ -20,6 +20,8 @@
package org.elasticsearch.discovery.zen.ping.unicast;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@@ -42,11 +44,10 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.RemoteTransportException;
import org.elasticsearch.transport.TransportChannel;
@@ -55,12 +56,14 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -158,18 +161,10 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
}
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
-
List<DiscoveryNode> configuredTargetNodes = new ArrayList<>();
- for (String host : hosts) {
- try {
- TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
- for (TransportAddress address : addresses) {
- configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#",
- address, emptyMap(), emptySet(), getVersion().minimumCompatibilityVersion()));
- }
- } catch (Exception e) {
- throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
- }
+ for (final String host : hosts) {
+ configuredTargetNodes.addAll(resolveDiscoveryNodes(host, limitPortCounts, transportService,
+ () -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#"));
}
this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]);
@@ -181,6 +176,32 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
threadFactory, threadPool.getThreadContext());
}
+ /**
+ * Resolves a host to a list of discovery nodes. The host is resolved into a transport
+ * address (or a collection of addresses if the number of ports is greater than one) and
+ * the transport addresses are used to created discovery nodes.
+ *
+ * @param host the host to resolve
+ * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
+ * @param transportService the transport service
+ * @param idGenerator the generator to supply unique ids for each discovery node
+ * @return a list of discovery nodes with resolved transport addresses
+ */
+ public static List<DiscoveryNode> resolveDiscoveryNodes(final String host, final int limitPortCounts,
+ final TransportService transportService, final Supplier<String> idGenerator) {
+ List<DiscoveryNode> discoveryNodes = new ArrayList<>();
+ try {
+ TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
+ for (TransportAddress address : addresses) {
+ discoveryNodes.add(new DiscoveryNode(idGenerator.get(), address, emptyMap(), emptySet(),
+ Version.CURRENT.minimumCompatibilityVersion()));
+ }
+ } catch (Exception e) {
+ throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
+ }
+ return discoveryNodes;
+ }
+
@Override
protected void doStart() {
}
@@ -191,7 +212,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
@Override
protected void doClose() {
- transportService.removeHandler(ACTION_NAME);
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
try {
IOUtils.close(receivedResponses.values());
@@ -217,8 +237,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
temporalResponses.clear();
}
- public PingResponse[] pingAndWait(TimeValue duration) {
- final AtomicReference<PingResponse[]> response = new AtomicReference<>();
+ // test only
+ Collection<PingResponse> pingAndWait(TimeValue duration) {
+ final AtomicReference<Collection<PingResponse>> response = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
ping(pings -> {
response.set(pings);
@@ -254,7 +275,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
protected void doRun() throws Exception {
sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler);
sendPingsHandler.close();
- listener.onPing(sendPingsHandler.pingCollection().toArray());
+ listener.onPing(sendPingsHandler.pingCollection().toList());
for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {
logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node);
transportService.disconnectFromNode(node);
@@ -413,13 +434,18 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
success = true;
} catch (ConnectTransportException e) {
// can't connect to the node - this is a more common path!
- logger.trace("[{}] failed to connect to {}", e, sendPingsHandler.id(), finalNodeToSend);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed to connect to {}", sendPingsHandler.id(), finalNodeToSend), e);
} catch (RemoteTransportException e) {
// something went wrong on the other side
- logger.debug("[{}] received a remote error as a response to ping {}", e,
- sendPingsHandler.id(), finalNodeToSend);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] received a remote error as a response to ping {}", sendPingsHandler.id(), finalNodeToSend), e);
} catch (Exception e) {
- logger.warn("[{}] failed send ping to {}", e, sendPingsHandler.id(), finalNodeToSend);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed send ping to {}", sendPingsHandler.id(), finalNodeToSend), e);
} finally {
if (!success) {
latch.countDown();
@@ -486,9 +512,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
latch.countDown();
if (exp instanceof ConnectTransportException) {
// ok, not connected...
- logger.trace("failed to connect to {}", exp, nodeToSend);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to connect to {}", nodeToSend), exp);
} else {
- logger.warn("failed to send ping to [{}]", exp, node);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp);
}
}
});
@@ -552,8 +578,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
}
private PingResponse createPingResponse(DiscoveryNodes discoNodes) {
- return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), clusterName,
- contextProvider.nodeHasJoinedClusterOnce());
+ return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), contextProvider.clusterState());
}
static class UnicastPingResponse extends TransportResponse {
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java
index 3c4a2b84c5..e060f68833 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java
@@ -21,7 +21,7 @@ package org.elasticsearch.discovery.zen.publish;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -30,15 +30,11 @@ import java.io.IOException;
/**
* Class encapsulating stats about the PendingClusterStatsQueue
*/
-public class PendingClusterStateStats implements Streamable, ToXContent {
+public class PendingClusterStateStats implements Writeable, ToXContent {
- private int total;
- private int pending;
- private int committed;
-
- public PendingClusterStateStats() {
-
- }
+ private final int total;
+ private final int pending;
+ private final int committed;
public PendingClusterStateStats(int total, int pending, int committed) {
this.total = total;
@@ -46,6 +42,19 @@ public class PendingClusterStateStats implements Streamable, ToXContent {
this.committed = committed;
}
+ public PendingClusterStateStats(StreamInput in) throws IOException {
+ total = in.readVInt();
+ pending = in.readVInt();
+ committed = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(total);
+ out.writeVInt(pending);
+ out.writeVInt(committed);
+ }
+
public int getCommitted() {
return committed;
}
@@ -68,20 +77,6 @@ public class PendingClusterStateStats implements Streamable, ToXContent {
return builder;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
- total = in.readVInt();
- pending = in.readVInt();
- committed = in.readVInt();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(total);
- out.writeVInt(pending);
- out.writeVInt(committed);
- }
-
static final class Fields {
static final String QUEUE = "cluster_state_queue";
static final String TOTAL = "total";
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java
index 24b093627b..01fb96b713 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java
@@ -18,10 +18,10 @@
*/
package org.elasticsearch.discovery.zen.publish;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.logging.ESLogger;
import java.util.ArrayList;
import java.util.Locale;
@@ -55,10 +55,10 @@ public class PendingClusterStatesQueue {
}
final ArrayList<ClusterStateContext> pendingStates = new ArrayList<>();
- final ESLogger logger;
+ final Logger logger;
final int maxQueueSize;
- public PendingClusterStatesQueue(ESLogger logger, int maxQueueSize) {
+ public PendingClusterStatesQueue(Logger logger, int maxQueueSize) {
this.logger = logger;
this.maxQueueSize = maxQueueSize;
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java
index 10f874923d..870e34cc1f 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.discovery.zen.publish;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -106,11 +107,6 @@ public class PublishClusterStateAction extends AbstractComponent {
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler());
}
- public void close() {
- transportService.removeHandler(SEND_ACTION_NAME);
- transportService.removeHandler(COMMIT_ACTION_NAME);
- }
-
public PendingClusterStatesQueue pendingStatesQueue() {
return pendingStatesQueue;
}
@@ -244,7 +240,8 @@ public class PublishClusterStateAction extends AbstractComponent {
bytes = serializeFullClusterState(clusterState, node.getVersion());
serializedStates.put(node.getVersion(), bytes);
} catch (Exception e) {
- logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e);
sendingController.onNodeSendFailed(node, e);
return;
}
@@ -290,13 +287,14 @@ public class PublishClusterStateAction extends AbstractComponent {
logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage());
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
} else {
- logger.debug("failed to send cluster state to {}", exp, node);
+ logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to send cluster state to {}", node), exp);
sendingController.onNodeSendFailed(node, exp);
}
}
});
} catch (Exception e) {
- logger.warn("error sending cluster state to {}", e, node);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("error sending cluster state to {}", node), e);
sendingController.onNodeSendFailed(node, e);
}
}
@@ -322,12 +320,12 @@ public class PublishClusterStateAction extends AbstractComponent {
@Override
public void handleException(TransportException exp) {
- logger.debug("failed to commit cluster state (uuid [{}], version [{}]) to {}", exp, clusterState.stateUUID(), clusterState.version(), node);
+ logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp);
sendingController.getPublishResponseHandler().onFailure(node, exp);
}
});
} catch (Exception t) {
- logger.warn("error sending cluster state commit (uuid [{}], version [{}]) to {}", t, clusterState.stateUUID(), clusterState.version(), node);
+ logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t);
sendingController.getPublishResponseHandler().onFailure(node, t);
}
}
@@ -626,7 +624,7 @@ public class PublishClusterStateAction extends AbstractComponent {
if (committedOrFailed()) {
return committed == false;
}
- logger.trace("failed to commit version [{}]. {}", reason, clusterState.version(), details);
+ logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason);
committed = false;
committedOrFailedLatch.countDown();
return true;
diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
index 4ddf0e38b7..f3e1f2fb24 100644
--- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
+++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
@@ -19,6 +19,9 @@
package org.elasticsearch.env;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.Directory;
@@ -36,7 +39,6 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -83,7 +85,7 @@ import static java.util.Collections.unmodifiableSet;
*/
public final class NodeEnvironment implements Closeable {
- private final ESLogger logger;
+ private final Logger logger;
public static class NodePath {
/* ${data.paths}/nodes/{node.id} */
@@ -196,7 +198,7 @@ public final class NodeEnvironment implements Closeable {
boolean success = false;
// trace logger to debug issues before the default node name is derived from the node id
- ESLogger startupTraceLogger = Loggers.getLogger(getClass(), settings);
+ Logger startupTraceLogger = Loggers.getLogger(getClass(), settings);
try {
sharedDataPath = environment.sharedDataFile();
@@ -207,13 +209,6 @@ public final class NodeEnvironment implements Closeable {
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
Path dataDir = environment.dataFiles()[dirIndex];
- // TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
- if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
- DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
- deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
- "Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
- dataDir = dataDirWithClusterName;
- }
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
@@ -231,7 +226,8 @@ public final class NodeEnvironment implements Closeable {
}
} catch (IOException e) {
- startupTraceLogger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
+ startupTraceLogger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e);
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
// release all the ones that were obtained up until now
releaseAndNullLocks(locks);
@@ -286,25 +282,6 @@ public final class NodeEnvironment implements Closeable {
}
}
- // Visible for testing
- /** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
- static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
- if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
- Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
- dirEmpty(dataPathWithClusterName)) { // Or if it's empty
- // No need to read from cluster-name folder!
- return false;
- }
- // The "nodes" directory inside of the cluster name
- Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
- if (Files.isDirectory(nodesPath)) {
- // The cluster has data in the "nodes" so we should read from the cluster-named folder for now
- return true;
- }
- // Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
- return false;
- }
-
private static void releaseAndNullLocks(Lock[] locks) {
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {
@@ -392,7 +369,7 @@ public final class NodeEnvironment implements Closeable {
* scans the node paths and loads existing metaData file. If not found a new meta data will be generated
* and persisted into the nodePaths
*/
- private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, ESLogger logger,
+ private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger,
NodePath... nodePaths) throws IOException {
final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths);
@@ -884,7 +861,7 @@ public final class NodeEnvironment implements Closeable {
logger.trace("releasing lock [{}]", lock);
lock.close();
} catch (IOException e) {
- logger.trace("failed to release lock [{}]", e, lock);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java b/core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
index 1c113e1abe..d1a8ce3b6d 100644
--- a/core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
+++ b/core/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
@@ -39,7 +39,7 @@ public class ShardLockObtainFailedException extends Exception {
@Override
public String getMessage() {
- StringBuffer sb = new StringBuffer();
+ StringBuilder sb = new StringBuilder();
sb.append(shardId.toString());
sb.append(": ");
sb.append(super.getMessage());
diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java
index dc7194b949..42c40034b1 100644
--- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java
+++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java
@@ -19,6 +19,9 @@
package org.elasticsearch.gateway;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
@@ -30,7 +33,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
@@ -63,7 +65,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
void list(ShardId shardId, DiscoveryNode[] nodes, ActionListener<NodesResponse> listener);
}
- protected final ESLogger logger;
+ protected final Logger logger;
protected final String type;
private final ShardId shardId;
private final Lister<BaseNodesResponse<T>, T> action;
@@ -72,7 +74,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
private boolean closed;
@SuppressWarnings("unchecked")
- protected AsyncShardFetch(ESLogger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {
+ protected AsyncShardFetch(Logger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {
this.logger = logger;
this.type = type;
this.shardId = shardId;
@@ -200,7 +202,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
if (unwrappedCause instanceof EsRejectedExecutionException || unwrappedCause instanceof ReceiveTimeoutTransportException || unwrappedCause instanceof ElasticsearchTimeoutException) {
nodeEntry.restartFetching();
} else {
- logger.warn("{}: failed to list shard for {} on node [{}]", failure, shardId, type, failure.nodeId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure);
nodeEntry.doneFetching(failure.getCause());
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java
new file mode 100644
index 0000000000..3874d54f45
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An abstract class that implements basic functionality for allocating
+ * shards to nodes based on shard copies that already exist in the cluster.
+ *
+ * Individual implementations of this class are responsible for providing
+ * the logic to determine to which nodes (if any) those shards are allocated.
+ */
+public abstract class BaseGatewayShardAllocator extends AbstractComponent {
+
+ public BaseGatewayShardAllocator(Settings settings) {
+ super(settings);
+ }
+
+ /**
+ * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
+ * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
+ * to make decisions on assigning shards to nodes.
+ *
+ * @param allocation the allocation state container object
+ */
+ public void allocateUnassigned(RoutingAllocation allocation) {
+ final RoutingNodes routingNodes = allocation.routingNodes();
+ final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
+ while (unassignedIterator.hasNext()) {
+ final ShardRouting shard = unassignedIterator.next();
+ final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger);
+
+ if (unassignedShardDecision.isDecisionTaken() == false) {
+ // no decision was taken by this allocator
+ continue;
+ }
+
+ if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) {
+ unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(),
+ unassignedShardDecision.getAllocationId(),
+ shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
+ allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
+ allocation.changes());
+ } else {
+ unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes());
+ }
+ }
+ }
+
+ /**
+ * Make a decision on the allocation of an unassigned shard. This method is used by
+ * {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not
+ * the shard can be allocated by this allocator and if so, to which node it will be allocated.
+ *
+ * @param unassignedShard the unassigned shard to allocate
+ * @param allocation the current routing state
+ * @param logger the logger
+ * @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision
+ */
+ public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard,
+ RoutingAllocation allocation,
+ Logger logger);
+}
diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java
index ee2abc17ab..3030632a76 100644
--- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java
+++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java
@@ -21,6 +21,7 @@ package org.elasticsearch.gateway;
import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
@@ -39,9 +40,6 @@ import org.elasticsearch.indices.IndicesService;
import java.util.Arrays;
import java.util.function.Supplier;
-/**
- *
- */
public class Gateway extends AbstractComponent implements ClusterStateListener {
private final ClusterService clusterService;
@@ -138,7 +136,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData);
}
} catch (Exception e) {
- logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex());
+ final Index electedIndex = electedIndexMetaData.getIndex();
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java
index a9fbe0ac82..450255575d 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java
@@ -19,6 +19,7 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -27,19 +28,18 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import java.util.List;
import java.util.concurrent.ConcurrentMap;
/**
@@ -115,21 +115,28 @@ public class GatewayAllocator extends AbstractComponent {
return count;
}
- public void applyStartedShards(StartedRerouteAllocation allocation) {
- for (ShardRouting shard : allocation.startedShards()) {
- Releasables.close(asyncFetchStarted.remove(shard.shardId()));
- Releasables.close(asyncFetchStore.remove(shard.shardId()));
+ public void applyStartedShards(final RoutingAllocation allocation, final List<ShardRouting> startedShards) {
+ for (ShardRouting startedShard : startedShards) {
+ Releasables.close(asyncFetchStarted.remove(startedShard.shardId()));
+ Releasables.close(asyncFetchStore.remove(startedShard.shardId()));
}
}
- public void applyFailedShards(FailedRerouteAllocation allocation) {
- for (FailedRerouteAllocation.FailedShard shard : allocation.failedShards()) {
- Releasables.close(asyncFetchStarted.remove(shard.routingEntry.shardId()));
- Releasables.close(asyncFetchStore.remove(shard.routingEntry.shardId()));
+ public void applyFailedShards(final RoutingAllocation allocation, final List<FailedShard> failedShards) {
+ for (FailedShard failedShard : failedShards) {
+ Releasables.close(asyncFetchStarted.remove(failedShard.getRoutingEntry().shardId()));
+ Releasables.close(asyncFetchStore.remove(failedShard.getRoutingEntry().shardId()));
}
}
public void allocateUnassigned(final RoutingAllocation allocation) {
+ innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator);
+ }
+
+ // allow for testing infra to change shard allocators implementation
+ protected static void innerAllocatedUnassigned(RoutingAllocation allocation,
+ PrimaryShardAllocator primaryShardAllocator,
+ ReplicaShardAllocator replicaShardAllocator) {
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
unassigned.sort(PriorityComparator.getAllocationComparator(allocation)); // sort for priority ordering
@@ -140,7 +147,7 @@ public class GatewayAllocator extends AbstractComponent {
class InternalAsyncFetch<T extends BaseNodeResponse> extends AsyncShardFetch<T> {
- public InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {
+ public InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {
super(logger, type, shardId, action);
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java
index f736c07027..8169062a70 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java
@@ -30,7 +30,6 @@ public class GatewayModule extends AbstractModule {
@Override
protected void configure() {
- bind(MetaStateService.class).asEagerSingleton();
bind(DanglingIndicesState.class).asEagerSingleton();
bind(GatewayService.class).asEagerSingleton();
bind(TransportNodesListGatewayMetaState.class).asEagerSingleton();
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
index 3282a8f2a4..856574748d 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
@@ -20,6 +20,8 @@
package org.elasticsearch.gateway;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
@@ -32,7 +34,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@@ -280,16 +281,13 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
routingTableBuilder.version(0);
// now, reroute
- RoutingAllocation.Result routingResult = allocationService.reroute(
- ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
- "state recovered");
-
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ updatedState = ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build();
+ return allocationService.reroute(updatedState, "state recovered");
}
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
index ee987b8665..24562b5216 100644
--- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
+++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
@@ -19,6 +19,8 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.block.ClusterBlocks;
@@ -28,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
@@ -148,8 +149,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
upgradedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
} catch (Exception ex) {
// upgrade failed - adding index as closed
- logger.warn("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", ex,
- indexMetaData.getIndex(), request.fromNode);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build();
}
metaData.put(upgradedIndexMetaData, false);
@@ -168,15 +168,13 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks).routingTable(routingTable).build();
// now, reroute
- RoutingAllocation.Result routingResult = allocationService.reroute(
+ return allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTable).build(), "dangling indices allocated");
-
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure during [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
try {
channel.sendResponse(e);
} catch (Exception inner) {
diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
index cc6a48b855..71c3190e2e 100644
--- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
+++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
@@ -18,6 +18,9 @@
*/
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
@@ -30,7 +33,6 @@ import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.store.IndexOutputOutputStream;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -254,11 +256,11 @@ public abstract class MetaDataStateFormat<T> {
* the states version from one or more data directories and if none of the latest states can be loaded an exception
* is thrown to prevent accidentally loading a previous state and silently omitting the latest state.
*
- * @param logger an elasticsearch logger instance
+ * @param logger a logger instance
* @param dataLocations the data-locations to try.
* @return the latest state or <code>null</code> if no state was found.
*/
- public T loadLatestState(ESLogger logger, Path... dataLocations) throws IOException {
+ public T loadLatestState(Logger logger, Path... dataLocations) throws IOException {
List<PathAndStateId> files = new ArrayList<>();
long maxStateId = -1;
boolean maxStateIdIsLegacy = true;
@@ -322,7 +324,9 @@ public abstract class MetaDataStateFormat<T> {
return state;
} catch (Exception e) {
exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e));
- logger.debug("{}: failed to read [{}], ignoring...", e, pathAndStateId.file.toAbsolutePath(), prefix);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e);
}
}
// if we reach this something went wrong
diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
index 9a36df4367..e58a48d41b 100644
--- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
+++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
@@ -19,11 +19,12 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
@@ -40,7 +41,6 @@ public class MetaStateService extends AbstractComponent {
private final NodeEnvironment nodeEnv;
- @Inject
public MetaStateService(Settings settings, NodeEnvironment nodeEnv) {
super(settings);
this.nodeEnv = nodeEnv;
@@ -128,7 +128,7 @@ public class MetaStateService extends AbstractComponent {
IndexMetaData.FORMAT.write(indexMetaData,
nodeEnv.indexPaths(indexMetaData.getIndex()));
} catch (Exception ex) {
- logger.warn("[{}]: failed to write index state", ex, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex);
throw new IOException("failed to write state for [" + index + "]", ex);
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
index 600e2c5e40..25ae3b7cce 100644
--- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
+++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
@@ -19,31 +19,36 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
-import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
+import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -61,7 +66,7 @@ import java.util.stream.Collectors;
* nor does it allocate primaries when a primary shard failed and there is a valid replica
* copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}.
*/
-public abstract class PrimaryShardAllocator extends AbstractComponent {
+public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> {
switch (value) {
@@ -93,119 +98,173 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings));
}
- public void allocateUnassigned(RoutingAllocation allocation) {
- final RoutingNodes routingNodes = allocation.routingNodes();
- final MetaData metaData = allocation.metaData();
-
- final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
- while (unassignedIterator.hasNext()) {
- final ShardRouting shard = unassignedIterator.next();
-
- if (shard.primary() == false) {
- continue;
- }
+ /**
+ * Is the allocator responsible for allocating the given {@link ShardRouting}?
+ */
+ private static boolean isResponsibleFor(final ShardRouting shard) {
+ return shard.primary() // must be primary
+ && shard.unassigned() // must be unassigned
+ // only handle either an existing store or a snapshot recovery
+ && (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE
+ || shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT);
+ }
- final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
- // don't go wild here and create a new IndexSetting object for every shard this could cause a lot of garbage
- // on cluster restart if we allocate a boat load of shards
- if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
- // when we create a fresh index
- continue;
- }
+ @Override
+ public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
+ final RoutingAllocation allocation,
+ final Logger logger) {
+ if (isResponsibleFor(unassignedShard) == false) {
+ // this allocator is not responsible for allocating this shard
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
+ }
- final AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
- if (shardState.hasData() == false) {
- logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
- allocation.setHasPendingAsyncFetch();
- unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
- continue;
- }
+ final boolean explain = allocation.debugDecision();
+ final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation);
+ if (shardState.hasData() == false) {
+ allocation.setHasPendingAsyncFetch();
+ return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
+ "still fetching shard state from the nodes in the cluster");
+ }
- final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id());
- final boolean snapshotRestore = shard.restoreSource() != null;
- final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
-
- final NodeShardsResult nodeShardsResult;
- final boolean enoughAllocationsFound;
-
- if (lastActiveAllocationIds.isEmpty()) {
- assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new";
- // when we load an old index (after upgrading cluster) or restore a snapshot of an old index
- // fall back to old version-based allocation mode
- // Note that once the shard has been active, lastActiveAllocationIds will be non-empty
- nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
- if (snapshotRestore || recoverOnAnyNode) {
- enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
- } else {
- enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
- }
- logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard);
+ // don't create a new IndexSetting object for every shard as this could cause a lot of garbage
+ // on cluster restart if we allocate a boat load of shards
+ final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index());
+ final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id());
+ final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
+ final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
+
+ final NodeShardsResult nodeShardsResult;
+ final boolean enoughAllocationsFound;
+
+ if (inSyncAllocationIds.isEmpty()) {
+ assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) :
+ "trying to allocate a primary with an empty in sync allocation id set, but index is new. index: "
+ + indexMetaData.getIndex();
+ // when we load an old index (after upgrading cluster) or restore a snapshot of an old index
+ // fall back to old version-based allocation mode
+ // Note that once the shard has been active, lastActiveAllocationIds will be non-empty
+ nodeShardsResult = buildVersionBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
+ allocation.getIgnoreNodes(unassignedShard.shardId()), shardState, logger);
+ if (snapshotRestore || recoverOnAnyNode) {
+ enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
} else {
- assert lastActiveAllocationIds.isEmpty() == false;
- // use allocation ids to select nodes
- nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode,
- allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
- enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
- logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, lastActiveAllocationIds);
+ enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
}
+ logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", unassignedShard.index(),
+ unassignedShard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, unassignedShard);
+ } else {
+ assert inSyncAllocationIds.isEmpty() == false;
+ // use allocation ids to select nodes
+ nodeShardsResult = buildAllocationIdBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
+ allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger);
+ enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
+ logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(),
+ unassignedShard.id(), nodeShardsResult.orderedAllocationCandidates.size(), unassignedShard, inSyncAllocationIds);
+ }
- if (enoughAllocationsFound == false){
- if (snapshotRestore) {
- // let BalancedShardsAllocator take care of allocating this shard
- logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
- } else if (recoverOnAnyNode) {
- // let BalancedShardsAllocator take care of allocating this shard
- logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
- } else {
- // we can't really allocate, so ignore it and continue
- unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes());
- logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound);
- }
- continue;
+ if (enoughAllocationsFound == false) {
+ if (snapshotRestore) {
+ // let BalancedShardsAllocator take care of allocating this shard
+ logger.debug("[{}][{}]: missing local data, will restore from [{}]",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
+ } else if (recoverOnAnyNode) {
+ // let BalancedShardsAllocator take care of allocating this shard
+ logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
+ } else {
+ // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
+ // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
+ // this shard will be picked up when the node joins and we do another allocation reroute
+ logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
+ unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
+ return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY,
+ "shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster");
}
+ }
- final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
- allocation, nodeShardsResult.orderedAllocationCandidates, shard, false
+ final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
+ allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, false
+ );
+ if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
+ DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0);
+ logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode());
+ final String nodeId = decidedNode.nodeShardState.getNode().getId();
+ return UnassignedShardDecision.yesDecision(
+ "the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
+ nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain));
+ } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
+ // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
+ // can be force-allocated to one of the nodes.
+ final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
+ allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true
);
- if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
- NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
- logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
- unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
- } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
- // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
- // can be force-allocated to one of the nodes.
- final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
- allocation, nodeShardsResult.orderedAllocationCandidates, shard, true
- );
- if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
- NodeGatewayStartedShards nodeShardState = nodesToForceAllocate.yesNodeShards.get(0);
- logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
- shard.index(), shard.id(), shard, nodeShardState.getNode());
- unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(),
- ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
- } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
- logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
- shard.index(), shard.id(), shard, nodesToForceAllocate.throttleNodeShards);
- unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
- } else {
- logger.debug("[{}][{}]: forced primary allocation denied [{}]", shard.index(), shard.id(), shard);
- unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_NO, allocation.changes());
- }
+ if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
+ final DecidedNode decidedNode = nodesToForceAllocate.yesNodeShards.get(0);
+ final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState;
+ logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode());
+ final String nodeId = nodeShardState.getNode().getId();
+ return UnassignedShardDecision.yesDecision(
+ "allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data",
+ nodeId,
+ nodeShardState.allocationId(),
+ buildNodeDecisions(nodesToForceAllocate, explain));
+ } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards);
+ return UnassignedShardDecision.throttleDecision(
+ "allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries",
+ buildNodeDecisions(nodesToForceAllocate, explain));
} else {
- // we are throttling this, but we have enough to allocate to this node, ignore it for now
- logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
- unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
+ logger.debug("[{}][{}]: forced primary allocation denied [{}]",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard);
+ return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO,
+ "all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted",
+ buildNodeDecisions(nodesToForceAllocate, explain));
}
+ } else {
+ // we are throttling this, since we are allowed to allocate to this node but there are enough allocations
+ // taking place on the node currently, ignore it for now
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards);
+ return UnassignedShardDecision.throttleDecision(
+ "allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries",
+ buildNodeDecisions(nodesToAllocate, explain));
}
}
/**
+ * Builds a map of nodes to the corresponding allocation decisions for those nodes.
+ */
+ private static Map<String, Decision> buildNodeDecisions(NodesToAllocate nodesToAllocate, boolean explain) {
+ if (explain == false) {
+ // not in explain mode, no need to return node level decisions
+ return null;
+ }
+ Map<String, Decision> nodeDecisions = new LinkedHashMap<>();
+ for (final DecidedNode decidedNode : nodesToAllocate.yesNodeShards) {
+ nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
+ }
+ for (final DecidedNode decidedNode : nodesToAllocate.throttleNodeShards) {
+ nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
+ }
+ for (final DecidedNode decidedNode : nodesToAllocate.noNodeShards) {
+ nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
+ }
+ return nodeDecisions;
+ }
+
+ /**
* Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching
- * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
+ * inSyncAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
* entries with matching allocation id are always at the front of the list.
*/
- protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
- Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
+ protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard,
+ Set<String> ignoreNodes, Set<String> inSyncAllocationIds,
+ FetchResult<NodeGatewayStartedShards> shardState,
+ Logger logger) {
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
int numberOfAllocationsFound = 0;
@@ -227,13 +286,14 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.trace("[{}] on node [{}] has no allocation id, out-dated shard (shard state version: [{}])", shard, nodeShardState.getNode(), nodeShardState.legacyVersion());
}
} else {
- logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId);
+ final String finalAllocationId = allocationId;
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
allocationId = null;
}
if (allocationId != null) {
numberOfAllocationsFound++;
- if (lastActiveAllocationIds.contains(allocationId)) {
+ if (inSyncAllocationIds.contains(allocationId)) {
if (nodeShardState.primary()) {
matchingNodeShardStates.addFirst(nodeShardState);
} else {
@@ -297,9 +357,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
List<NodeGatewayStartedShards> nodeShardStates,
ShardRouting shardRouting,
boolean forceAllocate) {
- List<NodeGatewayStartedShards> yesNodeShards = new ArrayList<>();
- List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
- List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
+ List<DecidedNode> yesNodeShards = new ArrayList<>();
+ List<DecidedNode> throttledNodeShards = new ArrayList<>();
+ List<DecidedNode> noNodeShards = new ArrayList<>();
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
if (node == null) {
@@ -308,12 +368,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) :
allocation.deciders().canAllocate(shardRouting, node, allocation);
- if (decision.type() == Decision.Type.THROTTLE) {
- throttledNodeShards.add(nodeShardState);
- } else if (decision.type() == Decision.Type.NO) {
- noNodeShards.add(nodeShardState);
+ DecidedNode decidedNode = new DecidedNode(nodeShardState, decision);
+ if (decision.type() == Type.THROTTLE) {
+ throttledNodeShards.add(decidedNode);
+ } else if (decision.type() == Type.NO) {
+ noNodeShards.add(decidedNode);
} else {
- yesNodeShards.add(nodeShardState);
+ yesNodeShards.add(decidedNode);
}
}
return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards));
@@ -323,8 +384,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to
* the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list.
*/
- NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
- AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
+ static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
+ FetchResult<NodeGatewayStartedShards> shardState, Logger logger) {
final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>();
int numberOfAllocationsFound = 0;
long highestVersion = ShardStateMetaData.NO_VERSION;
@@ -351,8 +412,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), nodeShardState.allocationId());
}
} else {
+ final long finalVerison = version;
// when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)
- logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", nodeShardState.storeException(), shard, nodeShardState.getNode(), version);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), finalVerison), nodeShardState.storeException());
version = ShardStateMetaData.NO_VERSION;
}
@@ -397,7 +459,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings);
}
- protected abstract AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
+ protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
static class NodeShardsResult {
public final List<NodeGatewayStartedShards> orderedAllocationCandidates;
@@ -410,16 +472,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
}
static class NodesToAllocate {
- final List<NodeGatewayStartedShards> yesNodeShards;
- final List<NodeGatewayStartedShards> throttleNodeShards;
- final List<NodeGatewayStartedShards> noNodeShards;
+ final List<DecidedNode> yesNodeShards;
+ final List<DecidedNode> throttleNodeShards;
+ final List<DecidedNode> noNodeShards;
- public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards,
- List<NodeGatewayStartedShards> throttleNodeShards,
- List<NodeGatewayStartedShards> noNodeShards) {
+ public NodesToAllocate(List<DecidedNode> yesNodeShards, List<DecidedNode> throttleNodeShards, List<DecidedNode> noNodeShards) {
this.yesNodeShards = yesNodeShards;
this.throttleNodeShards = throttleNodeShards;
this.noNodeShards = noNodeShards;
}
}
+
+ /**
+ * This class encapsulates the shard state retrieved from a node and the decision that was made
+ * by the allocator for allocating to the node that holds the shard copy.
+ */
+ private static class DecidedNode {
+ final NodeGatewayStartedShards nodeShardState;
+ final Decision decision;
+
+ private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) {
+ this.nodeShardState = nodeShardState;
+ this.decision = decision;
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
index 75a8a43fab..8f90e072ed 100644
--- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
+++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
@@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
@@ -31,24 +31,25 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
-import org.elasticsearch.cluster.routing.RoutingChangesObserver;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
*/
-public abstract class ReplicaShardAllocator extends AbstractComponent {
+public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
public ReplicaShardAllocator(Settings settings) {
super(settings);
@@ -76,8 +77,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
- IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
- if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
+ if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
continue;
}
@@ -97,7 +97,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
continue;
}
- MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
+ MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores, false);
if (matchingNodes.getNodeWithHighestMatch() != null) {
DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId());
DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch();
@@ -119,7 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node ["+ nodeWithHighestMatch + "]",
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, UnassignedInfo.AllocationStatus.NO_ATTEMPT);
// don't cancel shard in the loop as it will cause a ConcurrentModificationException
- shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, indexMetaData, allocation.changes()));
+ shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, metaData.getIndexSafe(shard.index()), allocation.changes()));
}
}
}
@@ -129,88 +129,88 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
}
- public void allocateUnassigned(RoutingAllocation allocation) {
- final RoutingNodes routingNodes = allocation.routingNodes();
- final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
- MetaData metaData = allocation.metaData();
- while (unassignedIterator.hasNext()) {
- ShardRouting shard = unassignedIterator.next();
- if (shard.primary()) {
- continue;
- }
+ /**
+ * Is the allocator responsible for allocating the given {@link ShardRouting}?
+ */
+ private static boolean isResponsibleFor(final ShardRouting shard) {
+ return shard.primary() == false // must be a replica
+ && shard.unassigned() // must be unassigned
+ // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
+ && shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED;
+ }
- // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
- IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
- if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
- continue;
- }
+ @Override
+ public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
+ final RoutingAllocation allocation,
+ final Logger logger) {
+ if (isResponsibleFor(unassignedShard) == false) {
+ // this allocator is not responsible for deciding on this shard
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
+ }
- // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
- Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation);
- if (decision.type() != Decision.Type.YES) {
- logger.trace("{}: ignoring allocation, can't be allocated on any node", shard);
- unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
- continue;
- }
+ final RoutingNodes routingNodes = allocation.routingNodes();
+ final boolean explain = allocation.debugDecision();
+ // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
+ Tuple<Decision, Map<String, Decision>> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain);
+ if (allocateDecision.v1().type() != Decision.Type.YES) {
+ logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard);
+ return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()),
+ "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard",
+ allocateDecision.v2());
+ }
- AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(shard, allocation);
- if (shardStores.hasData() == false) {
- logger.trace("{}: ignoring allocation, still fetching shard stores", shard);
- allocation.setHasPendingAsyncFetch();
- unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
- continue; // still fetching
- }
+ AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(unassignedShard, allocation);
+ if (shardStores.hasData() == false) {
+ logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard);
+ allocation.setHasPendingAsyncFetch();
+ return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
+ "still fetching shard state from the nodes in the cluster");
+ }
- ShardRouting primaryShard = routingNodes.activePrimary(shard.shardId());
- assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
- TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
- if (primaryStore == null) {
- // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
- // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
- // will try and recover from
- // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
- logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard);
- continue;
- }
+ ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId());
+ assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
+ if (primaryStore == null) {
+ // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
+ // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
+ // will try and recover from
+ // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
+ logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard);
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
+ }
- MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
-
- if (matchingNodes.getNodeWithHighestMatch() != null) {
- RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
- // we only check on THROTTLE since we checked before before on NO
- decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
- if (decision.type() == Decision.Type.THROTTLE) {
- logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
- // we are throttling this, but we have enough to allocate to this node, ignore it for now
- unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
- } else {
- logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
- // we found a match
- unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
- }
- } else if (matchingNodes.hasAnyData() == false) {
- // if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
- ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
+ MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain);
+ assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions";
+
+ if (matchingNodes.getNodeWithHighestMatch() != null) {
+ RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
+ // we only check on THROTTLE since we checked before before on NO
+ Decision decision = allocation.deciders().canAllocate(unassignedShard, nodeWithHighestMatch, allocation);
+ if (decision.type() == Decision.Type.THROTTLE) {
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
+ // we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now
+ return UnassignedShardDecision.throttleDecision(
+ "returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " +
+ "of those copies", matchingNodes.nodeDecisions);
+ } else {
+ logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store",
+ unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
+ // we found a match
+ return UnassignedShardDecision.yesDecision(
+ "allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store",
+ nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions);
}
+ } else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) {
+ // if we didn't manage to find *any* data (regardless of matching sizes), and the replica is
+ // unassigned due to a node leaving, so we delay allocation of this replica to see if the
+ // node with the shard copy will rejoin so we can re-use the copy it has
+ logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard);
+ return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION,
+ "not allocating this shard, no nodes contain data for the replica and allocation is delayed");
}
- }
- /**
- * Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list
- * Note: we only care about replica in delayed allocation, since if we have an unassigned primary it
- * will anyhow wait to find an existing copy of the shard to be allocated
- * Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
- *
- * PUBLIC FOR TESTS!
- *
- * @param unassignedIterator iterator over unassigned shards
- * @param shard the shard which might be delayed
- */
- public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard, RoutingChangesObserver changes) {
- if (shard.unassignedInfo().isDelayed()) {
- logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
- unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION, changes);
- }
+ return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
/**
@@ -218,10 +218,15 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*
* Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one
* node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided
- * YES or THROTTLE.
+ * YES or THROTTLE). If the explain flag is turned on AND the decision is NO or THROTTLE, then this method
+ * also returns a map of nodes to decisions (second value in the tuple) to use for explanations; if the explain
+ * flag is off, the second value in the return tuple will be null.
*/
- private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
+ private Tuple<Decision, Map<String, Decision>> canBeAllocatedToAtLeastOneNode(ShardRouting shard,
+ RoutingAllocation allocation,
+ boolean explain) {
Decision madeDecision = Decision.NO;
+ Map<String, Decision> nodeDecisions = new HashMap<>();
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
if (node == null) {
@@ -230,13 +235,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// if we can't allocate it on a node, ignore it, for example, this handles
// cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (explain) {
+ nodeDecisions.put(node.nodeId(), decision);
+ }
if (decision.type() == Decision.Type.YES) {
- return decision;
+ return Tuple.tuple(decision, null);
} else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) {
madeDecision = decision;
}
}
- return madeDecision;
+ return Tuple.tuple(madeDecision, explain ? nodeDecisions : null);
}
/**
@@ -257,8 +265,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
- AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
+ AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data,
+ boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
+ Map<String, Decision> nodeDecisions = new HashMap<>();
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
@@ -276,6 +286,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (explain) {
+ nodeDecisions.put(node.nodeId(), decision);
+ }
+
if (decision.type() == Decision.Type.NO) {
continue;
}
@@ -300,7 +314,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
}
- return new MatchingNodes(nodesToSize);
+ return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null);
}
protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation);
@@ -308,9 +322,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
static class MatchingNodes {
private final ObjectLongMap<DiscoveryNode> nodesToSize;
private final DiscoveryNode nodeWithHighestMatch;
+ @Nullable
+ private final Map<String, Decision> nodeDecisions;
- public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) {
+ public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize, @Nullable Map<String, Decision> nodeDecisions) {
this.nodesToSize = nodesToSize;
+ this.nodeDecisions = nodeDecisions;
long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null;
@@ -343,5 +360,13 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
public boolean hasAnyData() {
return nodesToSize.isEmpty() == false;
}
+
+ /**
+ * The decisions map for all nodes with a shard copy, if available.
+ */
+ @Nullable
+ public Map<String, Decision> getNodeDecisions() {
+ return nodeDecisions;
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
index fc23ef1358..31fc290c10 100644
--- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
+++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
@@ -19,6 +19,8 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@@ -140,8 +142,14 @@ public class TransportNodesListGatewayStartedShards extends
}
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
} catch (Exception exception) {
- logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId,
- shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
+ final ShardPath finalShardPath = shardPath;
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "{} can't open index for shard [{}] in path [{}]",
+ shardId,
+ shardStateMetaData,
+ (finalShardPath != null) ? finalShardPath.resolveIndex() : ""),
+ exception);
String allocationId = shardStateMetaData.allocationId != null ?
shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion,
diff --git a/core/src/main/java/org/elasticsearch/http/HttpInfo.java b/core/src/main/java/org/elasticsearch/http/HttpInfo.java
index 0f285974e8..e8f3985a23 100644
--- a/core/src/main/java/org/elasticsearch/http/HttpInfo.java
+++ b/core/src/main/java/org/elasticsearch/http/HttpInfo.java
@@ -21,7 +21,7 @@ package org.elasticsearch.http;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -29,15 +29,20 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-/**
- *
- */
-public class HttpInfo implements Streamable, ToXContent {
+public class HttpInfo implements Writeable, ToXContent {
+
+ private final BoundTransportAddress address;
+ private final long maxContentLength;
- private BoundTransportAddress address;
- private long maxContentLength;
+ public HttpInfo(StreamInput in) throws IOException {
+ address = BoundTransportAddress.readBoundTransportAddress(in);
+ maxContentLength = in.readLong();
+ }
- HttpInfo() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ address.writeTo(out);
+ out.writeLong(maxContentLength);
}
public HttpInfo(BoundTransportAddress address, long maxContentLength) {
@@ -63,24 +68,6 @@ public class HttpInfo implements Streamable, ToXContent {
return builder;
}
- public static HttpInfo readHttpInfo(StreamInput in) throws IOException {
- HttpInfo info = new HttpInfo();
- info.readFrom(in);
- return info;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- address = BoundTransportAddress.readBoundTransportAddress(in);
- maxContentLength = in.readLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- address.writeTo(out);
- out.writeLong(maxContentLength);
- }
-
public BoundTransportAddress address() {
return address;
}
diff --git a/core/src/main/java/org/elasticsearch/http/HttpServer.java b/core/src/main/java/org/elasticsearch/http/HttpServer.java
index ccf2d764c0..2ada0c23a4 100644
--- a/core/src/main/java/org/elasticsearch/http/HttpServer.java
+++ b/core/src/main/java/org/elasticsearch/http/HttpServer.java
@@ -25,14 +25,12 @@ import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
-import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
@@ -60,7 +58,6 @@ public class HttpServer extends AbstractLifecycleComponent implements HttpServer
private final CircuitBreakerService circuitBreakerService;
- @Inject
public HttpServer(Settings settings, HttpServerTransport transport, RestController restController,
NodeClient client, CircuitBreakerService circuitBreakerService) {
super(settings);
diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java
index 0ec57e2bfc..4dc4a888d8 100644
--- a/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java
+++ b/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java
@@ -24,6 +24,9 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
public interface HttpServerTransport extends LifecycleComponent {
+ String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker";
+ String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss";
+
BoundTransportAddress boundAddress();
HttpInfo info();
diff --git a/core/src/main/java/org/elasticsearch/http/HttpStats.java b/core/src/main/java/org/elasticsearch/http/HttpStats.java
index 973821f4a5..d019a65f03 100644
--- a/core/src/main/java/org/elasticsearch/http/HttpStats.java
+++ b/core/src/main/java/org/elasticsearch/http/HttpStats.java
@@ -21,42 +21,23 @@ package org.elasticsearch.http;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class HttpStats implements Streamable, ToXContent {
+public class HttpStats implements Writeable, ToXContent {
- private long serverOpen;
- private long totalOpen;
-
- HttpStats() {
-
- }
+ private final long serverOpen;
+ private final long totalOpen;
public HttpStats(long serverOpen, long totalOpen) {
this.serverOpen = serverOpen;
this.totalOpen = totalOpen;
}
- public long getServerOpen() {
- return this.serverOpen;
- }
-
- public long getTotalOpen() {
- return this.totalOpen;
- }
-
- public static HttpStats readHttpStats(StreamInput in) throws IOException {
- HttpStats stats = new HttpStats();
- stats.readFrom(in);
- return stats;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public HttpStats(StreamInput in) throws IOException {
serverOpen = in.readVLong();
totalOpen = in.readVLong();
}
@@ -67,6 +48,14 @@ public class HttpStats implements Streamable, ToXContent {
out.writeVLong(totalOpen);
}
+ public long getServerOpen() {
+ return this.serverOpen;
+ }
+
+ public long getTotalOpen() {
+ return this.totalOpen;
+ }
+
static final class Fields {
static final String HTTP = "http";
static final String CURRENT_OPEN = "current_open";
diff --git a/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
index b155a43610..25acdd06b4 100644
--- a/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
+++ b/core/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
@@ -19,16 +19,13 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
-/**
- *
- */
public abstract class AbstractIndexComponent implements IndexComponent {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final DeprecationLogger deprecationLogger;
protected final IndexSettings indexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java
index 97e00b98df..3b2cf5cbd0 100644
--- a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java
+++ b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java
@@ -19,9 +19,11 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.IndexEventListener;
@@ -40,7 +42,7 @@ import java.util.List;
final class CompositeIndexEventListener implements IndexEventListener {
private final List<IndexEventListener> listeners;
- private final ESLogger logger;
+ private final Logger logger;
CompositeIndexEventListener(IndexSettings indexSettings, Collection<IndexEventListener> listeners) {
for (IndexEventListener listener : listeners) {
@@ -58,7 +60,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke shard touring changed callback", e, indexShard.shardId().getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
}
}
}
@@ -69,7 +71,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardCreated(indexShard);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke after shard created callback", e, indexShard.shardId().getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
throw e;
}
}
@@ -81,7 +83,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardStarted(indexShard);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke after shard started callback", e, indexShard.shardId().getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
throw e;
}
}
@@ -94,7 +96,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardClosed(shardId, indexShard, indexSettings);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke before shard closed callback", e, shardId.getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
throw e;
}
}
@@ -107,7 +109,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardClosed(shardId, indexShard, indexSettings);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke after shard closed callback", e, shardId.getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
throw e;
}
}
@@ -119,7 +121,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.onShardInactive(indexShard);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke on shard inactive callback", e, indexShard.shardId().getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
throw e;
}
}
@@ -131,7 +133,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke index shard state changed callback", e, indexShard.shardId().getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
throw e;
}
}
@@ -167,7 +169,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardCreated(shardId, indexSettings);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke before shard created callback", e, shardId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
throw e;
}
}
@@ -228,7 +230,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardDeleted(shardId, indexSettings);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke before shard deleted callback", e, shardId.getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
throw e;
}
}
@@ -241,7 +243,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardDeleted(shardId, indexSettings);
} catch (Exception e) {
- logger.warn("[{}] failed to invoke after shard deleted callback", e, shardId.getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
throw e;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index b9f93bf2ac..58457417fa 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -41,7 +43,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
import org.elasticsearch.index.analysis.AnalysisRegistry;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
@@ -92,13 +94,10 @@ import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
-/**
- *
- */
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
private final IndexEventListener eventListener;
- private final AnalysisService analysisService;
+ private final IndexAnalyzers indexAnalyzers;
private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache;
private final NodeEnvironment nodeEnv;
@@ -138,9 +137,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
List<IndexingOperationListener> indexingOperationListeners) throws IOException {
super(indexSettings);
this.indexSettings = indexSettings;
- this.analysisService = registry.build(indexSettings);
+ this.indexAnalyzers = registry.build(indexSettings);
this.similarityService = similarityService;
- this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry,
+ this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry,
IndexService.this::newQueryShardContext);
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
nodeServicesProvider.getCircuitBreakerService(), mapperService);
@@ -215,8 +214,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
return indexFieldData;
}
- public AnalysisService analysisService() {
- return this.analysisService;
+ public IndexAnalyzers getIndexAnalyzers() {
+ return this.indexAnalyzers;
}
public MapperService mapperService() {
@@ -240,7 +239,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
}
}
} finally {
- IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
+ IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, indexAnalyzers, refreshTask, fsyncTask);
}
}
}
@@ -397,7 +396,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
final boolean flushEngine = deleted.get() == false && closed.get();
indexShard.close(reason, flushEngine);
} catch (Exception e) {
- logger.debug("[{}] failed to close index shard", e, shardId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
// ignore
}
}
@@ -408,7 +407,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
try {
store.close();
} catch (Exception e) {
- logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed to close store on shard removal (reason: [{}])", shardId, reason), e);
}
}
}
@@ -427,7 +428,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
}
} catch (IOException e) {
shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings);
- logger.debug("[{}] failed to delete shard content - scheduled a retry", e, lock.getShardId().id());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e);
}
}
}
@@ -639,7 +642,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
try {
shard.onSettingsChanged();
} catch (Exception e) {
- logger.warn("[{}] failed to notify shard about setting change", e, shard.shardId().id());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed to notify shard about setting change", shard.shardId().id()), e);
}
}
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
@@ -781,8 +786,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
} catch (Exception ex) {
if (lastThrownException == null || sameException(lastThrownException, ex) == false) {
// prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs
- indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes",
- ex, toString());
+ indexService.logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to run task {} - suppressing re-occurring exceptions unless the exception changes",
+ toString()),
+ ex);
lastThrownException = ex;
}
} finally {
diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java
index dd0551aa5b..0a21c3b7b5 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java
@@ -18,11 +18,11 @@
*/
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
@@ -130,7 +130,7 @@ public final class IndexSettings {
private final Index index;
private final Version version;
- private final ESLogger logger;
+ private final Logger logger;
private final String nodeName;
private final Settings nodeSettings;
private final int numberOfShards;
@@ -263,8 +263,9 @@ public final class IndexSettings {
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, mergePolicyConfig::setSegmentsPerTier);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, mergePolicyConfig::setReclaimDeletesWeight);
- scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, mergeSchedulerConfig::setMaxThreadCount);
- scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, mergeSchedulerConfig::setMaxMergeCount);
+
+ scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
+ mergeSchedulerConfig::setMaxThreadAndMergeCount);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
scopedSettings.addSettingsUpdateConsumer(INDEX_TTL_DISABLE_PURGE_SETTING, this::setTTLPurgeDisabled);
diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
index ba48adb71a..439acb239a 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
@@ -143,11 +145,18 @@ public final class IndexWarmer extends AbstractComponent {
}
if (indexShard.warmerService().logger().isTraceEnabled()) {
- indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(),
+ indexShard.warmerService().logger().trace(
+ "warmed global ordinals for [{}], took [{}]",
+ fieldType.name(),
TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Exception e) {
- indexShard.warmerService().logger().warn("failed to warm-up global ordinals for [{}]", e, fieldType.name());
+ indexShard
+ .warmerService()
+ .logger()
+ .warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to warm-up global ordinals for [{}]", fieldType.name()), e);
} finally {
latch.countDown();
}
diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
index a145012dd2..513e87878d 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
@@ -19,9 +19,9 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -34,8 +34,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
-/**
- */
public final class IndexingSlowLog implements IndexingOperationListener {
private final Index index;
private boolean reformat;
@@ -52,7 +50,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private SlowLogLevel level;
- private final ESLogger indexLogger;
+ private final Logger indexLogger;
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING =
@@ -112,7 +110,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
private void setLevel(SlowLogLevel level) {
this.level = level;
- this.indexLogger.setLevel(level.name());
+ Loggers.setLevel(this.indexLogger, level.name());
}
private void setWarnThreshold(TimeValue warnThreshold) {
diff --git a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java
index b5856d606e..0f7305789e 100644
--- a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java
@@ -19,11 +19,10 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeUnit;
@@ -117,7 +116,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
public final class MergePolicyConfig {
private final TieredMergePolicy mergePolicy = new TieredMergePolicy();
- private final ESLogger logger;
+ private final Logger logger;
private final boolean mergesEnabled;
public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d;
@@ -155,7 +154,7 @@ public final class MergePolicyConfig {
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin
- MergePolicyConfig(ESLogger logger, IndexSettings indexSettings) {
+ MergePolicyConfig(Logger logger, IndexSettings indexSettings) {
this.logger = logger;
double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage
ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING);
@@ -172,10 +171,10 @@ public final class MergePolicyConfig {
maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING));
mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
- mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
+ mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac());
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
- mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
+ mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac());
mergePolicy.setSegmentsPerTier(segmentsPerTier);
mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
if (logger.isTraceEnabled()) {
@@ -193,7 +192,7 @@ public final class MergePolicyConfig {
}
void setMaxMergedSegment(ByteSizeValue maxMergedSegment) {
- mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
+ mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac());
}
void setMaxMergesAtOnceExplicit(Integer maxMergeAtOnceExplicit) {
@@ -205,7 +204,7 @@ public final class MergePolicyConfig {
}
void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) {
- mergePolicy.setFloorSegmentMB(floorSegementSetting.mbFrac());
+ mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac());
}
void setExpungeDeletesAllowed(Double value) {
diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java
index 2eb43a50ee..e04d3dc7a4 100644
--- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java
@@ -69,13 +69,14 @@ public final class MergeSchedulerConfig {
private volatile int maxMergeCount;
MergeSchedulerConfig(IndexSettings indexSettings) {
- maxThreadCount = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
- maxMergeCount = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
+ setMaxThreadAndMergeCount(indexSettings.getValue(MAX_THREAD_COUNT_SETTING),
+ indexSettings.getValue(MAX_MERGE_COUNT_SETTING));
this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
}
/**
* Returns <code>true</code> iff auto throttle is enabled.
+ *
* @see ConcurrentMergeScheduler#enableAutoIOThrottle()
*/
public boolean isAutoThrottle() {
@@ -100,8 +101,19 @@ public final class MergeSchedulerConfig {
* Expert: directly set the maximum number of merge threads and
* simultaneous merges allowed.
*/
- void setMaxThreadCount(int maxThreadCount) {
+ void setMaxThreadAndMergeCount(int maxThreadCount, int maxMergeCount) {
+ if (maxThreadCount < 1) {
+ throw new IllegalArgumentException("maxThreadCount should be at least 1");
+ }
+ if (maxMergeCount < 1) {
+ throw new IllegalArgumentException("maxMergeCount should be at least 1");
+ }
+ if (maxThreadCount > maxMergeCount) {
+ throw new IllegalArgumentException("maxThreadCount (= " + maxThreadCount +
+ ") should be <= maxMergeCount (= " + maxMergeCount + ")");
+ }
this.maxThreadCount = maxThreadCount;
+ this.maxMergeCount = maxMergeCount;
}
/**
@@ -110,12 +122,4 @@ public final class MergeSchedulerConfig {
public int getMaxMergeCount() {
return maxMergeCount;
}
-
- /**
- *
- * Expert: set the maximum number of simultaneous merges allowed.
- */
- void setMaxMergeCount(int maxMergeCount) {
- this.maxMergeCount = maxMergeCount;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java
index 148b676331..19086416b8 100644
--- a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java
+++ b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -30,8 +30,6 @@ import org.elasticsearch.search.internal.SearchContext;
import java.util.concurrent.TimeUnit;
-/**
- */
public final class SearchSlowLog implements SearchOperationListener {
private boolean reformat;
@@ -47,8 +45,8 @@ public final class SearchSlowLog implements SearchOperationListener {
private SlowLogLevel level;
- private final ESLogger queryLogger;
- private final ESLogger fetchLogger;
+ private final Logger queryLogger;
+ private final Logger fetchLogger;
private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog";
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING =
@@ -113,8 +111,8 @@ public final class SearchSlowLog implements SearchOperationListener {
private void setLevel(SlowLogLevel level) {
this.level = level;
- this.queryLogger.setLevel(level.name());
- this.fetchLogger.setLevel(level.name());
+ Loggers.setLevel(queryLogger, level.name());
+ Loggers.setLevel(fetchLogger, level.name());
}
@Override
public void onQueryPhase(SearchContext context, long tookInNanos) {
diff --git a/core/src/main/java/org/elasticsearch/index/VersionType.java b/core/src/main/java/org/elasticsearch/index/VersionType.java
index 3d0448d16a..062fbce10d 100644
--- a/core/src/main/java/org/elasticsearch/index/VersionType.java
+++ b/core/src/main/java/org/elasticsearch/index/VersionType.java
@@ -198,52 +198,6 @@ public enum VersionType implements Writeable {
return version >= 0L || version == Versions.MATCH_ANY;
}
- },
- /**
- * Warning: this version type should be used with care. Concurrent indexing may result in loss of data on replicas
- */
- FORCE((byte) 3) {
- @Override
- public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
- if (currentVersion == Versions.NOT_FOUND) {
- return false;
- }
- if (expectedVersion == Versions.MATCH_ANY) {
- throw new IllegalStateException("you must specify a version when use VersionType.FORCE");
- }
- return false;
- }
-
- @Override
- public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
- throw new AssertionError("VersionType.FORCE should never result in a write conflict");
- }
-
- @Override
- public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
- return false;
- }
-
- @Override
- public String explainConflictForReads(long currentVersion, long expectedVersion) {
- throw new AssertionError("VersionType.FORCE should never result in a read conflict");
- }
-
- @Override
- public long updateVersion(long currentVersion, long expectedVersion) {
- return expectedVersion;
- }
-
- @Override
- public boolean validateVersionForWrites(long version) {
- return version >= 0L;
- }
-
- @Override
- public boolean validateVersionForReads(long version) {
- return version >= 0L || version == Versions.MATCH_ANY;
- }
-
};
private final byte value;
@@ -337,8 +291,6 @@ public enum VersionType implements Writeable {
return EXTERNAL;
} else if ("external_gte".equals(versionType)) {
return EXTERNAL_GTE;
- } else if ("force".equals(versionType)) {
- return FORCE;
}
throw new IllegalArgumentException("No version type match [" + versionType + "]");
}
@@ -357,8 +309,6 @@ public enum VersionType implements Writeable {
return EXTERNAL;
} else if (value == 2) {
return EXTERNAL_GTE;
- } else if (value == 3) {
- return FORCE;
}
throw new IllegalArgumentException("No version type match [" + value + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
index 2ce5e48902..aded2bb4ee 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
@@ -19,9 +19,8 @@
package org.elasticsearch.index.analysis;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.LegacyNumericTokenStream;
+import org.apache.logging.log4j.Logger;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
@@ -53,13 +52,10 @@ import org.apache.lucene.analysis.ro.RomanianAnalyzer;
import org.apache.lucene.analysis.ru.RussianAnalyzer;
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
import org.apache.lucene.analysis.th.ThaiAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@@ -70,7 +66,6 @@ import java.io.Reader;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -82,12 +77,9 @@ import java.util.Set;
import static java.util.Collections.unmodifiableMap;
-/**
- *
- */
public class Analysis {
- public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, ESLogger logger) {
+ public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) {
// check for explicit version on the specific analyzer component
String sVersion = settings.get("version");
if (sVersion != null) {
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
index 119e0c16ea..6dddf6eb57 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
@@ -18,14 +18,20 @@
*/
package org.elasticsearch.index.analysis;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
@@ -39,6 +45,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@@ -46,7 +53,7 @@ import static java.util.Collections.unmodifiableMap;
/**
* An internal registry for tokenizer, token filter, char filter and analyzer.
- * This class exists per node and allows to create per-index {@link AnalysisService} via {@link #build(IndexSettings)}
+ * This class exists per node and allows to create per-index {@link IndexAnalyzers} via {@link #build(IndexSettings)}
*/
public final class AnalysisRegistry implements Closeable {
public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter";
@@ -136,17 +143,19 @@ public final class AnalysisRegistry implements Closeable {
}
/**
- * Creates an index-level {@link AnalysisService} from this registry using the given index settings
+ * Creates an index-level {@link IndexAnalyzers} from this registry using the given index settings
*/
- public AnalysisService build(IndexSettings indexSettings) throws IOException {
- final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER);
- final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
- final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
- final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
+ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException {
- final Map<String, CharFilterFactory> charFilterFactories = buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
- final Map<String, TokenizerFactory> tokenizerFactories = buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
+ final Map<String, CharFilterFactory> charFilterFactories = buildCharFilterFactories(indexSettings);
+ final Map<String, TokenizerFactory> tokenizerFactories = buildTokenizerFactories(indexSettings);
+ final Map<String, TokenFilterFactory> tokenFilterFactories = buildTokenFilterFactories(indexSettings);
+ final Map<String, AnalyzerProvider<?>> analyzierFactories = buildAnalyzerFactories(indexSettings);
+ return build(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
+ }
+ public Map<String, TokenFilterFactory> buildTokenFilterFactories(IndexSettings indexSettings) throws IOException {
+ final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
/*
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
@@ -154,10 +163,22 @@ public final class AnalysisRegistry implements Closeable {
* hide internal data-structures as much as possible.
*/
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
- final Map<String, TokenFilterFactory> tokenFilterFactories = buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
- final Map<String, AnalyzerProvider<?>> analyzierFactories = buildMapping(true, "analyzer", indexSettings, analyzersSettings,
- analyzers, prebuiltAnalysis.analyzerProviderFactories);
- return new AnalysisService(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
+ return buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
+ }
+
+ public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {
+ final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
+ return buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
+ }
+
+ public Map<String, CharFilterFactory> buildCharFilterFactories(IndexSettings indexSettings) throws IOException {
+ final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER);
+ return buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
+ }
+
+ public Map<String, AnalyzerProvider<?>> buildAnalyzerFactories(IndexSettings indexSettings) throws IOException {
+ final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
+ return buildMapping(true, "analyzer", indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories);
}
/**
@@ -174,7 +195,7 @@ public final class AnalysisRegistry implements Closeable {
Settings currentSettings = tokenizerSettings.get(tokenizer);
return getAnalysisProvider("tokenizer", tokenizers, tokenizer, currentSettings.get("type"));
} else {
- return prebuiltAnalysis.tokenizerFactories.get(tokenizer);
+ return getTokenizerProvider(tokenizer);
}
}
@@ -202,7 +223,7 @@ public final class AnalysisRegistry implements Closeable {
return getAnalysisProvider("tokenfilter", tokenFilters, tokenFilter, typeName);
}
} else {
- return prebuiltAnalysis.tokenFilterFactories.get(tokenFilter);
+ return getTokenFilterProvider(tokenFilter);
}
}
@@ -220,7 +241,7 @@ public final class AnalysisRegistry implements Closeable {
Settings currentSettings = tokenFilterSettings.get(charFilter);
return getAnalysisProvider("charfilter", charFilters, charFilter, currentSettings.get("type"));
} else {
- return prebuiltAnalysis.charFilterFactories.get(charFilter);
+ return getCharFilterProvider(charFilter);
}
}
@@ -399,4 +420,132 @@ public final class AnalysisRegistry implements Closeable {
IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList()));
}
}
+
+ public IndexAnalyzers build(IndexSettings indexSettings,
+ Map<String, AnalyzerProvider<?>> analyzerProviders,
+ Map<String, TokenizerFactory> tokenizerFactoryFactories,
+ Map<String, CharFilterFactory> charFilterFactoryFactories,
+ Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
+
+ Index index = indexSettings.getIndex();
+ analyzerProviders = new HashMap<>(analyzerProviders);
+ Logger logger = Loggers.getLogger(getClass(), indexSettings.getSettings());
+ DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
+ Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
+ Map<String, NamedAnalyzer> analyzers = new HashMap<>();
+ for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
+ processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers,
+ tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
+ }
+ for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
+ String key = entry.getKey();
+ if (analyzers.containsKey(key) &&
+ ("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
+ throw new IllegalStateException("already registered analyzer with name: " + key);
+ } else {
+ NamedAnalyzer configured = entry.getValue();
+ analyzers.put(key, configured);
+ }
+ }
+
+ if (!analyzers.containsKey("default")) {
+ processAnalyzerFactory(deprecationLogger, indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
+ analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
+ }
+ if (!analyzers.containsKey("default_search")) {
+ analyzers.put("default_search", analyzers.get("default"));
+ }
+ if (!analyzers.containsKey("default_search_quoted")) {
+ analyzers.put("default_search_quoted", analyzers.get("default_search"));
+ }
+
+
+ NamedAnalyzer defaultAnalyzer = analyzers.get("default");
+ if (defaultAnalyzer == null) {
+ throw new IllegalArgumentException("no default analyzer configured");
+ }
+ if (analyzers.containsKey("default_index")) {
+ final Version createdVersion = indexSettings.getIndexVersionCreated();
+ if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
+ throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
+ } else {
+ deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index.getName());
+ }
+ }
+ NamedAnalyzer defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
+ NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
+ NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
+
+ for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
+ if (analyzer.getKey().startsWith("_")) {
+ throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
+ }
+ }
+ return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer,
+ unmodifiableMap(analyzers));
+ }
+
+ private void processAnalyzerFactory(DeprecationLogger deprecationLogger,
+ IndexSettings indexSettings,
+ String name,
+ AnalyzerProvider<?> analyzerFactory,
+ Map<String, NamedAnalyzer> analyzerAliases,
+ Map<String, NamedAnalyzer> analyzers, Map<String, TokenFilterFactory> tokenFilters,
+ Map<String, CharFilterFactory> charFilters, Map<String, TokenizerFactory> tokenizers) {
+ /*
+ * Lucene defaults positionIncrementGap to 0 in all analyzers but
+ * Elasticsearch defaults them to 0 only before version 2.0
+ * and 100 afterwards so we override the positionIncrementGap if it
+ * doesn't match here.
+ */
+ int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
+ if (analyzerFactory instanceof CustomAnalyzerProvider) {
+ ((CustomAnalyzerProvider) analyzerFactory).build(tokenizers, charFilters, tokenFilters);
+ /*
+ * Custom analyzers already default to the correct, version
+ * dependent positionIncrementGap and the user is be able to
+ * configure the positionIncrementGap directly on the analyzer so
+ * we disable overriding the positionIncrementGap to preserve the
+ * user's setting.
+ */
+ overridePositionIncrementGap = Integer.MIN_VALUE;
+ }
+ Analyzer analyzerF = analyzerFactory.get();
+ if (analyzerF == null) {
+ throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
+ }
+ NamedAnalyzer analyzer;
+ if (analyzerF instanceof NamedAnalyzer) {
+ // if we got a named analyzer back, use it...
+ analyzer = (NamedAnalyzer) analyzerF;
+ if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
+ // unless the positionIncrementGap needs to be overridden
+ analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
+ }
+ } else {
+ analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
+ }
+ if (analyzers.containsKey(name)) {
+ throw new IllegalStateException("already registered analyzer with name: " + name);
+ }
+ analyzers.put(name, analyzer);
+ // TODO: remove alias support completely when we no longer support pre 5.0 indices
+ final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
+ if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
+ if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_beta1)) {
+ // do not allow alias creation if the index was created on or after v5.0 alpha6
+ throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
+ }
+
+ // the setting is now removed but we only support it for loading indices created before v5.0
+ deprecationLogger.deprecated("setting [{}] is only allowed on index [{}] because it was created before 5.x; " +
+ "analyzer aliases can no longer be created on new indices.", analyzerAliasKey, indexSettings.getIndex().getName());
+ Set<String> aliases = Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey));
+ for (String alias : aliases) {
+ if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
+ throw new IllegalStateException("alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]");
+ }
+ }
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java
deleted file mode 100644
index cb84e6c6d0..0000000000
--- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.analysis;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.elasticsearch.Version;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.set.Sets;
-import org.elasticsearch.index.AbstractIndexComponent;
-import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.mapper.TextFieldMapper;
-
-import java.io.Closeable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import static java.util.Collections.unmodifiableMap;
-
-/**
- *
- */
-public class AnalysisService extends AbstractIndexComponent implements Closeable {
-
- private final Map<String, NamedAnalyzer> analyzers;
- private final Map<String, TokenizerFactory> tokenizers;
- private final Map<String, CharFilterFactory> charFilters;
- private final Map<String, TokenFilterFactory> tokenFilters;
-
- private final NamedAnalyzer defaultIndexAnalyzer;
- private final NamedAnalyzer defaultSearchAnalyzer;
- private final NamedAnalyzer defaultSearchQuoteAnalyzer;
-
- public AnalysisService(IndexSettings indexSettings,
- Map<String, AnalyzerProvider<?>> analyzerProviders,
- Map<String, TokenizerFactory> tokenizerFactoryFactories,
- Map<String, CharFilterFactory> charFilterFactoryFactories,
- Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
- super(indexSettings);
- this.tokenizers = unmodifiableMap(tokenizerFactoryFactories);
- this.charFilters = unmodifiableMap(charFilterFactoryFactories);
- this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);
- analyzerProviders = new HashMap<>(analyzerProviders);
-
- Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
- Map<String, NamedAnalyzer> analyzers = new HashMap<>();
- for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
- processAnalyzerFactory(entry.getKey(), entry.getValue(), analyzerAliases, analyzers);
- }
- for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
- String key = entry.getKey();
- if (analyzers.containsKey(key) &&
- ("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
- throw new IllegalStateException("already registered analyzer with name: " + key);
- } else {
- NamedAnalyzer configured = entry.getValue();
- analyzers.put(key, configured);
- }
- }
-
- if (!analyzers.containsKey("default")) {
- processAnalyzerFactory("default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
- analyzerAliases, analyzers);
- }
- if (!analyzers.containsKey("default_search")) {
- analyzers.put("default_search", analyzers.get("default"));
- }
- if (!analyzers.containsKey("default_search_quoted")) {
- analyzers.put("default_search_quoted", analyzers.get("default_search"));
- }
-
-
- NamedAnalyzer defaultAnalyzer = analyzers.get("default");
- if (defaultAnalyzer == null) {
- throw new IllegalArgumentException("no default analyzer configured");
- }
- if (analyzers.containsKey("default_index")) {
- final Version createdVersion = indexSettings.getIndexVersionCreated();
- if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
- throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
- } else {
- deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());
- }
- }
- defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
- defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
- defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
-
- for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
- if (analyzer.getKey().startsWith("_")) {
- throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
- }
- }
- this.analyzers = unmodifiableMap(analyzers);
- }
-
- private void processAnalyzerFactory(String name, AnalyzerProvider<?> analyzerFactory, Map<String, NamedAnalyzer> analyzerAliases, Map<String, NamedAnalyzer> analyzers) {
- /*
- * Lucene defaults positionIncrementGap to 0 in all analyzers but
- * Elasticsearch defaults them to 0 only before version 2.0
- * and 100 afterwards so we override the positionIncrementGap if it
- * doesn't match here.
- */
- int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
- if (analyzerFactory instanceof CustomAnalyzerProvider) {
- ((CustomAnalyzerProvider) analyzerFactory).build(this);
- /*
- * Custom analyzers already default to the correct, version
- * dependent positionIncrementGap and the user is be able to
- * configure the positionIncrementGap directly on the analyzer so
- * we disable overriding the positionIncrementGap to preserve the
- * user's setting.
- */
- overridePositionIncrementGap = Integer.MIN_VALUE;
- }
- Analyzer analyzerF = analyzerFactory.get();
- if (analyzerF == null) {
- throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
- }
- NamedAnalyzer analyzer;
- if (analyzerF instanceof NamedAnalyzer) {
- // if we got a named analyzer back, use it...
- analyzer = (NamedAnalyzer) analyzerF;
- if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
- // unless the positionIncrementGap needs to be overridden
- analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
- }
- } else {
- analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
- }
- if (analyzers.containsKey(name)) {
- throw new IllegalStateException("already registered analyzer with name: " + name);
- }
- analyzers.put(name, analyzer);
- // TODO: remove alias support completely when we no longer support pre 5.0 indices
- final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
- if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
- if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) {
- // do not allow alias creation if the index was created on or after v5.0 alpha6
- throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
- }
-
- // the setting is now removed but we only support it for loading indices created before v5.0
- deprecationLogger.deprecated("setting [{}] is only allowed on index [{}] because it was created before 5.x; " +
- "analyzer aliases can no longer be created on new indices.", analyzerAliasKey, index().getName());
- Set<String> aliases = Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey));
- for (String alias : aliases) {
- if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
- throw new IllegalStateException("alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]");
- }
- }
- }
- }
-
- @Override
- public void close() {
- for (NamedAnalyzer analyzer : analyzers.values()) {
- if (analyzer.scope() == AnalyzerScope.INDEX) {
- try {
- analyzer.close();
- } catch (NullPointerException e) {
- // because analyzers are aliased, they might be closed several times
- // an NPE is thrown in this case, so ignore....
- // TODO: Analyzer's can no longer have aliases in indices created in 5.x and beyond,
- // so we only allow the aliases for analyzers on indices created pre 5.x for backwards
- // compatibility. Once pre 5.0 indices are no longer supported, this check should be removed.
- } catch (Exception e) {
- logger.debug("failed to close analyzer {}", analyzer);
- }
- }
- }
- }
-
- public NamedAnalyzer analyzer(String name) {
- return analyzers.get(name);
- }
-
- public NamedAnalyzer defaultIndexAnalyzer() {
- return defaultIndexAnalyzer;
- }
-
- public NamedAnalyzer defaultSearchAnalyzer() {
- return defaultSearchAnalyzer;
- }
-
- public NamedAnalyzer defaultSearchQuoteAnalyzer() {
- return defaultSearchQuoteAnalyzer;
- }
-
- public TokenizerFactory tokenizer(String name) {
- return tokenizers.get(name);
- }
-
- public CharFilterFactory charFilter(String name) {
- return charFilters.get(name);
- }
-
- public TokenFilterFactory tokenFilter(String name) {
- return tokenFilters.get(name);
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java
index 5a1754a02f..4b185c450d 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
public ArabicAnalyzer get() {
return this.arabicAnalyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java
index fabb8b0738..b58b8a8788 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
public ArmenianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java
index 1ceffd43c8..17f601084b 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.eu.BasqueAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
public BasqueAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java
index 9e33dce3a9..872d1fb708 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
public BrazilianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java
index f0122874ac..efcc1bfba9 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java
@@ -22,7 +22,7 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.br.BrazilianStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -42,4 +42,4 @@ public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory
public TokenStream create(TokenStream tokenStream) {
return new BrazilianStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java
index e93233169b..4eddc84d31 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
public BulgarianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java
index 04c068a437..cd03649feb 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ca.CatalanAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
public CatalanAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java
index 57796c1651..1728a4a3f7 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -44,4 +44,4 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
public CJKAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java
index 0225ec6455..da1ca02268 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
import org.apache.lucene.analysis.commongrams.CommonGramsQueryFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java
index 144cbe8174..63861e8084 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java
@@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.TextFieldMapper;
import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
/**
* A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list
@@ -43,35 +44,36 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
this.analyzerSettings = settings;
}
- public void build(AnalysisService analysisService) {
+ public void build(final Map<String, TokenizerFactory> tokenizers, final Map<String, CharFilterFactory> charFilters,
+ final Map<String, TokenFilterFactory> tokenFilters) {
String tokenizerName = analyzerSettings.get("tokenizer");
if (tokenizerName == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] must be configured with a tokenizer");
}
- TokenizerFactory tokenizer = analysisService.tokenizer(tokenizerName);
+ TokenizerFactory tokenizer = tokenizers.get(tokenizerName);
if (tokenizer == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name [" + tokenizerName + "]");
}
- List<CharFilterFactory> charFilters = new ArrayList<>();
+ List<CharFilterFactory> charFiltersList = new ArrayList<>();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
- CharFilterFactory charFilter = analysisService.charFilter(charFilterName);
+ CharFilterFactory charFilter = charFilters.get(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
}
- charFilters.add(charFilter);
+ charFiltersList.add(charFilter);
}
- List<TokenFilterFactory> tokenFilters = new ArrayList<>();
+ List<TokenFilterFactory> tokenFilterList = new ArrayList<>();
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
- TokenFilterFactory tokenFilter = analysisService.tokenFilter(tokenFilterName);
+ TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
}
- tokenFilters.add(tokenFilter);
+ tokenFilterList.add(tokenFilter);
}
int positionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
@@ -93,8 +95,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);;
this.customAnalyzer = new CustomAnalyzer(tokenizer,
- charFilters.toArray(new CharFilterFactory[charFilters.size()]),
- tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]),
+ charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
+ tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]),
positionIncrementGap,
offsetGap
);
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java
index 6480b13965..f1487d198b 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.cz.CzechAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
public CzechAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java
index adf1290d25..041ca52c42 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.da.DanishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
public DanishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java
index cff7a6f208..1c33131624 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
public DutchAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java
index 078f1e2d29..daa67b00d3 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -45,4 +45,4 @@ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
tokenStream = new SetKeywordMarkerFilter(tokenStream, exclusions);
return new SnowballFilter(tokenStream, new DutchStemmer());
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
index 2291e199b3..0b94fb301a 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ElisionFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@@ -47,4 +47,4 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem
public Object getMultiTermComponent() {
return this;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java
index 271934bbd7..bcb7889253 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
public EnglishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java
index 985a081ccc..0a550f19aa 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java
@@ -20,14 +20,14 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
import org.apache.lucene.analysis.miscellaneous.FingerprintFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.util.CharArraySet;
/** OpenRefine Fingerprinting, which uses a Standard tokenizer and lowercase + stop + fingerprint + asciifolding filters */
public final class FingerprintAnalyzer extends Analyzer {
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java
index bb8a51e096..490b90efb7 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java
@@ -20,7 +20,7 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.CharArraySet;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java
index 676da5f0c0..3a2b329248 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.fi.FinnishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
public FinnishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java
index 51314633d2..ff848dc681 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
public FrenchAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java
index 3a0ce5d266..e24dc86a22 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -45,4 +45,4 @@ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
tokenStream = new SetKeywordMarkerFilter(tokenStream, exclusions);
return new SnowballFilter(tokenStream, new FrenchStemmer());
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java
index 0be7f76bf0..6f6521f52f 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.gl.GalicianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
public GalicianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java
index 98e5adf852..a55df0f1b8 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.de.GermanAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
public GermanAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java
index ef9c89fed4..72e66c29df 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
public TokenStream create(TokenStream tokenStream) {
return new GermanStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java
index 4e628e5e30..85e08764dc 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.hi.HindiAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
public HindiAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java
index 751ef0094f..f5a09f2ce3 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.hu.HungarianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
public HungarianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java b/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java
new file mode 100644
index 0000000000..127714178b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.IndexSettings;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * IndexAnalyzers contains a name to analyzer mapping for a specific index.
+ * This class only holds analyzers that are explicitly configured for an index and doesn't allow
+ * access to individual tokenizers, char or token filter.
+ *
+ * @see AnalysisRegistry
+ */
+public final class IndexAnalyzers extends AbstractIndexComponent implements Closeable {
+ private final NamedAnalyzer defaultIndexAnalyzer;
+ private final NamedAnalyzer defaultSearchAnalyzer;
+ private final NamedAnalyzer defaultSearchQuoteAnalyzer;
+ private final Map<String, NamedAnalyzer> analyzers;
+ private final IndexSettings indexSettings;
+
+ public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer,
+ NamedAnalyzer defaultSearchQuoteAnalyzer, Map<String, NamedAnalyzer> analyzers) {
+ super(indexSettings);
+ this.defaultIndexAnalyzer = defaultIndexAnalyzer;
+ this.defaultSearchAnalyzer = defaultSearchAnalyzer;
+ this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer;
+ this.analyzers = analyzers;
+ this.indexSettings = indexSettings;
+ }
+
+ /**
+ * Returns an analyzer mapped to the given name or <code>null</code> if not present
+ */
+ public NamedAnalyzer get(String name) {
+ return analyzers.get(name);
+ }
+
+
+ /**
+ * Returns the default index analyzer for this index
+ */
+ public NamedAnalyzer getDefaultIndexAnalyzer() {
+ return defaultIndexAnalyzer;
+ }
+
+ /**
+ * Returns the default search analyzer for this index
+ */
+ public NamedAnalyzer getDefaultSearchAnalyzer() {
+ return defaultSearchAnalyzer;
+ }
+
+ /**
+ * Returns the default search quote analyzer for this index
+ */
+ public NamedAnalyzer getDefaultSearchQuoteAnalyzer() {
+ return defaultSearchQuoteAnalyzer;
+ }
+
+ @Override
+ public void close() throws IOException {
+ IOUtils.close(() -> analyzers.values().stream()
+ .filter(a -> a.scope() == AnalyzerScope.INDEX)
+ .iterator());
+ }
+
+ /**
+ * Returns the indices settings
+ */
+ public IndexSettings getIndexSettings() {
+ return indexSettings;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java
index f15cc74a9a..b4c41a3ce3 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.id.IndonesianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
public IndonesianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java
index 813db1d36b..0b27cb73a3 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ga.IrishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
public IrishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java
index 9457b45e9f..fd2246a1d6 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.it.ItalianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
public ItalianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java
index ab00657313..334bbab569 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java
@@ -19,9 +19,9 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java
index b787ed6409..0805a3bdf8 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java
@@ -19,9 +19,9 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java
index 01865c17d1..757c6d2f4e 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.lv.LatvianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
public LatvianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java
index 9b24eaa16a..7d180b3e0f 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.lt.LithuanianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Li
public LithuanianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java
index dcad6960ba..1d9ca2272b 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
import org.apache.lucene.analysis.ga.IrishLowerCaseFilter;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/MinHashTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/MinHashTokenFilterFactory.java
new file mode 100644
index 0000000000..19213dffe2
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/index/analysis/MinHashTokenFilterFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.minhash.MinHashFilterFactory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexSettings;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * TokenFilterFactoryAdapter for {@link MinHashFilterFactory}
+ *
+ */
+public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final MinHashFilterFactory minHashFilterFactory;
+
+ public MinHashTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
+ super(indexSettings, name, settings);
+ minHashFilterFactory = new MinHashFilterFactory(convertSettings(settings));
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return minHashFilterFactory.create(tokenStream);
+ }
+
+ private Map<String, String> convertSettings(Settings settings) {
+ Map<String, String> settingMap = new HashMap<>();
+ settingMap.put("hashCount", settings.get("hash_count"));
+ settingMap.put("bucketCount", settings.get("bucket_count"));
+ settingMap.put("hashSetSize", settings.get("hash_set_size"));
+ settingMap.put("withRotation", settings.get("with_rotation"));
+ return settingMap;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java
index 1b136bfcef..fb0b8e36cf 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.no.NorwegianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Nor
public NorwegianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzer.java
index d5da62f67b..7554f459bf 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzer.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzer.java
@@ -20,12 +20,12 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.pattern.PatternTokenizer;
-import org.apache.lucene.analysis.util.CharArraySet;
import java.util.regex.Pattern;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java
index f00988f4ad..c96d26676a 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java
@@ -20,8 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.Version;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java
index 11cbaac4ad..919bdd933f 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Po
public PortugueseAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java
index a455cef3ad..9e08c638e5 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ro.RomanianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Roma
public RomanianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java
index fca42325e4..9478b7ff23 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ru.RussianAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Russi
public RussianAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianFoldingFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianFoldingFilterFactory.java
index dc6e83860b..c55b487fe7 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianFoldingFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianFoldingFilterFactory.java
@@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
/**
* Factory for {@link ScandinavianFoldingFilter}
*/
-public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory {
+public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
public ScandinavianFoldingFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
@@ -38,4 +38,8 @@ public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory
return new ScandinavianFoldingFilter(tokenStream);
}
+ @Override
+ public Object getMultiTermComponent() {
+ return this;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java
index 71ac62c68c..72c96dd0c2 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/ScandinavianNormalizationFilterFactory.java
@@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
/**
* Factory for {@link ScandinavianNormalizationFilter}
*/
-public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory {
+public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
public ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
@@ -38,4 +38,8 @@ public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterF
return new ScandinavianNormalizationFilter(tokenStream);
}
+ @Override
+ public Object getMultiTermComponent() {
+ return this;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java
index a07ae16f8e..1a096b8fa4 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzer.java
@@ -20,16 +20,16 @@ package org.elasticsearch.index.analysis;
*/
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java
index b8e092b53d..84f1931633 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java
@@ -18,11 +18,11 @@
*/
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java
index 388f5bcdbf..6b5cf153f3 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ckb.SoraniAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider<Sorani
public SoraniAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java
index e8afd7b479..ff95272e81 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.es.SpanishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Spani
public SpanishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java
index 00300be489..2af7b5bbab 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java
@@ -19,9 +19,9 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java
index a755e54db1..2203dbdfea 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java
@@ -19,15 +19,15 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.util.CharArraySet;
-import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java
index a3c65b0a17..b9e25346b2 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java
index cb1c4b8f5c..aca1fda299 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider<StopAnal
public StopAnalyzer get() {
return this.stopAnalyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java
index e724f064bb..4e7c3ae1af 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.core.StopFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java
index a0e81f29cb..bbc14f474b 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Swedi
public SwedishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java
index 8daff40332..11f1303328 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java
@@ -20,9 +20,9 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.synonym.SolrSynonymParser;
import org.apache.lucene.analysis.synonym.SynonymFilter;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java
index 0866bea14f..368dcbe3ab 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -43,4 +43,4 @@ public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Turki
public TurkishAnalyzer get() {
return this.analyzer;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java
index 118d7f84a1..ccc60f4ce7 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java
index b67aebd502..e7147334a7 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java
@@ -19,8 +19,8 @@
package org.elasticsearch.index.analysis.compound;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
index cc3a90c4ab..0e4c54e7a7 100644
--- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
+++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index.cache.bitset;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -258,7 +260,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Exception e) {
- indexShard.warmerService().logger().warn("failed to load bitset for [{}]", e, filterToWarm);
+ indexShard.warmerService().logger().warn((Supplier<?>) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
} finally {
latch.countDown();
}
diff --git a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java
index f8d19be3c8..59be64a85d 100644
--- a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java
+++ b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java
@@ -19,13 +19,12 @@
package org.elasticsearch.index.codec;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
-import org.apache.lucene.codecs.lucene54.Lucene54Codec;
-import org.apache.lucene.codecs.lucene60.Lucene60Codec;
+import org.apache.lucene.codecs.lucene62.Lucene62Codec;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.MapBuilder;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.mapper.MapperService;
import java.util.Map;
@@ -45,11 +44,11 @@ public class CodecService {
/** the raw unfiltered lucene default. useful for testing */
public static final String LUCENE_DEFAULT_CODEC = "lucene_default";
- public CodecService(@Nullable MapperService mapperService, ESLogger logger) {
+ public CodecService(@Nullable MapperService mapperService, Logger logger) {
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
if (mapperService == null) {
- codecs.put(DEFAULT_CODEC, new Lucene60Codec());
- codecs.put(BEST_COMPRESSION_CODEC, new Lucene60Codec(Mode.BEST_COMPRESSION));
+ codecs.put(DEFAULT_CODEC, new Lucene62Codec());
+ codecs.put(BEST_COMPRESSION_CODEC, new Lucene62Codec(Mode.BEST_COMPRESSION));
} else {
codecs.put(DEFAULT_CODEC,
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));
diff --git a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
index e16e66904c..54f15feaa7 100644
--- a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
+++ b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
@@ -19,11 +19,11 @@
package org.elasticsearch.index.codec;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60Codec;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.lucene.codecs.lucene62.Lucene62Codec;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.CompletionFieldMapper2x;
@@ -39,15 +39,15 @@ import org.elasticsearch.index.mapper.MapperService;
* configured for a specific field the default postings format is used.
*/
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
-public class PerFieldMappingPostingFormatCodec extends Lucene60Codec {
- private final ESLogger logger;
+public class PerFieldMappingPostingFormatCodec extends Lucene62Codec {
+ private final Logger logger;
private final MapperService mapperService;
static {
assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : "PerFieldMappingPostingFormatCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC;
}
- public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, ESLogger logger) {
+ public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) {
super(compressionMode);
this.mapperService = mapperService;
this.logger = logger;
diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java
index a2900f649e..baacc4b240 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java
@@ -19,15 +19,18 @@
package org.elasticsearch.index.engine;
-import org.elasticsearch.index.translog.Translog;
+import org.apache.lucene.util.RamUsageEstimator;
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
class DeleteVersionValue extends VersionValue {
+
+ private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class);
+
private final long time;
- public DeleteVersionValue(long version, long time, Translog.Location translogLocation) {
- super(version, translogLocation);
+ public DeleteVersionValue(long version, long time) {
+ super(version);
this.time = time;
}
@@ -43,6 +46,6 @@ class DeleteVersionValue extends VersionValue {
@Override
public long ramBytesUsed() {
- return super.ramBytesUsed() + Long.BYTES;
+ return BASE_RAM_BYTES_USED;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java
index 965a2e58f9..466da06dec 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java
@@ -19,12 +19,12 @@
package org.elasticsearch.index.engine;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MergeScheduler;
import org.apache.lucene.index.OneMergeHelper;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
@@ -34,9 +34,9 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.MergeSchedulerConfig;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.merge.OnGoingMerge;
-import org.elasticsearch.index.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
@@ -50,7 +50,7 @@ import java.util.Set;
*/
class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
- protected final ESLogger logger;
+ protected final Logger logger;
private final Settings indexSettings;
private final ShardId shardId;
diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java
index 12b021ddb7..17822a7133 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -19,6 +19,9 @@
package org.elasticsearch.index.engine;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexCommit;
@@ -39,6 +42,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
+import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
@@ -48,7 +52,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions;
@@ -84,15 +87,12 @@ import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
-/**
- *
- */
public abstract class Engine implements Closeable {
public static final String SYNC_COMMIT_ID = "sync_id";
protected final ShardId shardId;
- protected final ESLogger logger;
+ protected final Logger logger;
protected final EngineConfig engineConfig;
protected final Store store;
protected final AtomicBoolean isClosed = new AtomicBoolean(false);
@@ -102,7 +102,7 @@ public abstract class Engine implements Closeable {
protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock());
protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock());
- protected volatile Exception failedEngine = null;
+ protected final SetOnce<Exception> failedEngine = new SetOnce<>();
/*
* on <tt>lastWriteNanos</tt> we use System.nanoTime() to initialize this since:
* - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still consider it active
@@ -277,7 +277,7 @@ public abstract class Engine implements Closeable {
}
}
- public abstract boolean index(Index operation) throws EngineException;
+ public abstract void index(Index operation) throws EngineException;
public abstract void delete(Delete delete) throws EngineException;
@@ -364,7 +364,7 @@ public abstract class Engine implements Closeable {
throw ex;
} catch (Exception ex) {
ensureOpen(); // throw EngineCloseException here if we are already closed
- logger.error("failed to acquire searcher, source {}", ex, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
} finally {
if (!success) { // release the ref in the case of an error...
@@ -378,7 +378,7 @@ public abstract class Engine implements Closeable {
protected void ensureOpen() {
if (isClosed.get()) {
- throw new EngineClosedException(shardId, failedEngine);
+ throw new EngineClosedException(shardId, failedEngine.get());
}
}
@@ -443,8 +443,7 @@ public abstract class Engine implements Closeable {
try {
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
} catch (IOException e) {
- logger.warn("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", e,
- segmentReader.directory(), segmentCommitInfo);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
return ImmutableOpenMap.of();
}
@@ -459,14 +458,16 @@ public abstract class Engine implements Closeable {
try {
files = directory.listAll();
} catch (IOException e) {
- logger.warn("Couldn't list Compound Reader Directory [{}]", e, directory);
+ final Directory finalDirectory = directory;
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
return ImmutableOpenMap.of();
}
} else {
try {
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
} catch (IOException e) {
- logger.warn("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", e, segmentReader, segmentReader.getSegmentInfo());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
return ImmutableOpenMap.of();
}
}
@@ -478,9 +479,14 @@ public abstract class Engine implements Closeable {
try {
length = directory.fileLength(file);
} catch (NoSuchFileException | FileNotFoundException e) {
- logger.warn("Tried to query fileLength but file is gone [{}] [{}]", e, directory, file);
+ final Directory finalDirectory = directory;
+ logger.warn((Supplier<?>)
+ () -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e);
} catch (IOException e) {
- logger.warn("Error when trying to query fileLength [{}] [{}]", e, directory, file);
+ final Directory finalDirectory = directory;
+ logger.warn(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e);
}
if (length == 0L) {
continue;
@@ -492,7 +498,10 @@ public abstract class Engine implements Closeable {
try {
directory.close();
} catch (IOException e) {
- logger.warn("Error when closing compound reader on Directory [{}]", e, directory);
+ final Directory finalDirectory = directory;
+ logger.warn(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e);
}
}
@@ -527,7 +536,7 @@ public abstract class Engine implements Closeable {
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
- logger.trace("failed to get size for [{}]", e, info.info.name);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
final SegmentReader segmentReader = segmentReader(reader.reader());
segment.memoryInBytes = segmentReader.ramBytesUsed();
@@ -557,7 +566,7 @@ public abstract class Engine implements Closeable {
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
- logger.trace("failed to get size for [{}]", e, info.info.name);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segments.put(info.info.name, segment);
} else {
@@ -591,7 +600,7 @@ public abstract class Engine implements Closeable {
the store is closed so we need to make sure we increment it here
*/
try {
- return !getSearcherManager().isSearcherCurrent();
+ return getSearcherManager().isSearcherCurrent() == false;
} catch (IOException e) {
logger.error("failed to access searcher manager", e);
failEngine("failed to access searcher manager", e);
@@ -664,17 +673,19 @@ public abstract class Engine implements Closeable {
if (failEngineLock.tryLock()) {
store.incRef();
try {
+ if (failedEngine.get() != null) {
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
+ return;
+ }
+ // this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine
+ // again on any caught AlreadyClosedException
+ failedEngine.set((failure != null) ? failure : new IllegalStateException(reason));
try {
// we just go and close this engine - no way to recover
closeNoLock("engine failed on: [" + reason + "]");
} finally {
- if (failedEngine != null) {
- logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason);
- return;
- }
- logger.warn("failed engine [{}]", failure, reason);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed engine [{}]", reason), failure);
// we must set a failure exception, generate one if not supplied
- failedEngine = (failure != null) ? failure : new IllegalStateException(reason);
// we first mark the store as corrupted before we notify any listeners
// this must happen first otherwise we might try to reallocate so quickly
// on the same node that we don't see the corrupted marker file when
@@ -696,7 +707,7 @@ public abstract class Engine implements Closeable {
store.decRef();
}
} else {
- logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
}
}
@@ -847,18 +858,24 @@ public abstract class Engine implements Closeable {
public static class Index extends Operation {
private final ParsedDocument doc;
+ private final long autoGeneratedIdTimestamp;
+ private final boolean isRetry;
+ private boolean created;
- public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
+ public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime,
+ long autoGeneratedIdTimestamp, boolean isRetry) {
super(uid, version, versionType, origin, startTime);
this.doc = doc;
+ this.isRetry = isRetry;
+ this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
}
public Index(Term uid, ParsedDocument doc) {
this(uid, doc, Versions.MATCH_ANY);
- }
+ } // TEST ONLY
- public Index(Term uid, ParsedDocument doc, long version) {
- this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
+ Index(Term uid, ParsedDocument doc, long version) {
+ this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), -1, false);
}
public ParsedDocument parsedDoc() {
@@ -905,11 +922,36 @@ public abstract class Engine implements Closeable {
return this.doc.source();
}
+ public boolean isCreated() {
+ return created;
+ }
+
+ public void setCreated(boolean created) {
+ this.created = created;
+ }
+
@Override
protected int estimatedSizeInBytes() {
return (id().length() + type().length()) * 2 + source().length() + 12;
}
+ /**
+ * Returns a positive timestamp if the ID of this document is auto-generated by elasticsearch.
+ * if this property is non-negative indexing code might optimize the addition of this document
+ * due to it's append only nature.
+ */
+ public long getAutoGeneratedIdTimestamp() {
+ return autoGeneratedIdTimestamp;
+ }
+
+ /**
+ * Returns <code>true</code> if this index requests has been retried on the coordinating node and can therefor be delivered
+ * multiple times. Note: this might also be set to true if an equivalent event occurred like the replay of the transaction log
+ */
+ public boolean isRetry() {
+ return isRetry;
+ }
+
}
public static class Delete extends Operation {
@@ -1000,32 +1042,23 @@ public abstract class Engine implements Closeable {
public static class GetResult implements Releasable {
private final boolean exists;
private final long version;
- private final Translog.Source source;
private final Versions.DocIdAndVersion docIdAndVersion;
private final Searcher searcher;
- public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null);
+ public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null, null);
- /**
- * Build a realtime get result from the translog.
- */
- public GetResult(boolean exists, long version, @Nullable Translog.Source source) {
- this.source = source;
+ private GetResult(boolean exists, long version, Versions.DocIdAndVersion docIdAndVersion, Searcher searcher) {
this.exists = exists;
this.version = version;
- this.docIdAndVersion = null;
- this.searcher = null;
+ this.docIdAndVersion = docIdAndVersion;
+ this.searcher = searcher;
}
/**
* Build a non-realtime get result from the searcher.
*/
public GetResult(Searcher searcher, Versions.DocIdAndVersion docIdAndVersion) {
- this.exists = true;
- this.source = null;
- this.version = docIdAndVersion.version;
- this.docIdAndVersion = docIdAndVersion;
- this.searcher = searcher;
+ this(true, docIdAndVersion.version, docIdAndVersion, searcher);
}
public boolean exists() {
@@ -1036,11 +1069,6 @@ public abstract class Engine implements Closeable {
return this.version;
}
- @Nullable
- public Translog.Source source() {
- return source;
- }
-
public Searcher searcher() {
return this.searcher;
}
@@ -1055,9 +1083,7 @@ public abstract class Engine implements Closeable {
}
public void release() {
- if (searcher != null) {
- searcher.close();
- }
+ Releasables.close(searcher);
}
}
@@ -1081,8 +1107,6 @@ public abstract class Engine implements Closeable {
logger.debug("flushing shard on close - this might take some time to sync files to disk");
try {
flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running.
- } catch (FlushNotAllowedEngineException ex) {
- logger.debug("flush not allowed during flushAndClose - skipping");
} catch (EngineClosedException ex) {
logger.debug("engine already closed - skipping flushAndClose");
}
@@ -1209,4 +1233,11 @@ public abstract class Engine implements Closeable {
* This operation will close the engine if the recovery fails.
*/
public abstract Engine recoverFromTranslog() throws IOException;
+
+ /**
+ * Returns <code>true</code> iff this engine is currently recovering from translog.
+ */
+ public boolean isRecovering() {
+ return false;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
index 13408408e7..e598eecc07 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
@@ -24,7 +24,9 @@ import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy;
+import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -33,7 +35,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.codec.CodecService;
-import org.elasticsearch.index.shard.RefreshListeners;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.store.Store;
@@ -65,8 +66,9 @@ public final class EngineConfig {
private final Engine.EventListener eventListener;
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
+ private final long maxUnsafeAutoIdTimestamp;
@Nullable
- private final RefreshListeners refreshListeners;
+ private final ReferenceManager.RefreshListener refreshListeners;
/**
* Index setting to change the low level lucene codec used for writing new segments.
@@ -89,7 +91,17 @@ public final class EngineConfig {
}
}, Property.IndexScope, Property.NodeScope);
- private TranslogConfig translogConfig;
+ /**
+ * Configures an index to optimize documents with auto generated ids for append only. If this setting is updated from <code>false</code>
+ * to <code>true</code> might not take effect immediately. In other words, disabling the optimiation will be immediately applied while
+ * re-enabling it might not be applied until the engine is in a safe state to do so. Depending on the engine implementation a change to
+ * this setting won't be reflected re-enabled optimization until the engine is restarted or the index is closed and reopened.
+ * The default is <code>true</code>
+ */
+ public static final Setting<Boolean> INDEX_OPTIMIZE_AUTO_GENERATED_IDS = Setting.boolSetting("index.optimize_auto_generated_id", true,
+ Property.IndexScope, Property.Dynamic);
+
+ private final TranslogConfig translogConfig;
private final OpenMode openMode;
/**
@@ -97,10 +109,11 @@ public final class EngineConfig {
*/
public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool,
IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
- MergePolicy mergePolicy,Analyzer analyzer,
+ MergePolicy mergePolicy, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
- TranslogConfig translogConfig, TimeValue flushMergesAfter, RefreshListeners refreshListeners) {
+ TranslogConfig translogConfig, TimeValue flushMergesAfter, ReferenceManager.RefreshListener refreshListeners,
+ long maxUnsafeAutoIdTimestamp) {
if (openMode == null) {
throw new IllegalArgumentException("openMode must not be null");
}
@@ -127,6 +140,9 @@ public final class EngineConfig {
this.flushMergesAfter = flushMergesAfter;
this.openMode = openMode;
this.refreshListeners = refreshListeners;
+ assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP :
+ "maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp;
+ this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp;
}
/**
@@ -306,9 +322,17 @@ public final class EngineConfig {
}
/**
- * {@linkplain RefreshListeners} instance to configure.
+ * {@linkplain ReferenceManager.RefreshListener} instance to configure.
*/
- public RefreshListeners getRefreshListeners() {
+ public ReferenceManager.RefreshListener getRefreshListeners() {
return refreshListeners;
}
+
+ /**
+ * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine.
+ * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs
+ */
+ public long getMaxUnsafeAutoIdTimestamp() {
+ return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS) ? maxUnsafeAutoIdTimestamp : Long.MAX_VALUE;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java
index ac95799b3b..a53ac1dd41 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.engine;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SearcherManager;
import org.apache.lucene.store.AlreadyClosedException;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.store.Store;
import java.io.IOException;
@@ -35,9 +35,9 @@ public class EngineSearcher extends Engine.Searcher {
private final SearcherManager manager;
private final AtomicBoolean released = new AtomicBoolean(false);
private final Store store;
- private final ESLogger logger;
+ private final Logger logger;
- public EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager, Store store, ESLogger logger) {
+ public EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager, Store store, Logger logger) {
super(source, searcher);
this.manager = manager;
this.store = store;
@@ -59,9 +59,8 @@ public class EngineSearcher extends Engine.Searcher {
} catch (IOException e) {
throw new IllegalStateException("Cannot close", e);
} catch (AlreadyClosedException e) {
- /* this one can happen if we already closed the
- * underlying store / directory and we call into the
- * IndexWriter to free up pending files. */
+ // This means there's a bug somewhere: don't suppress it
+ throw new AssertionError(e);
} finally {
store.decRef();
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index eba6fa1080..6336318d1e 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.engine;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooOldException;
@@ -41,18 +42,21 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.LoggerInfoStream;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.merge.OnGoingMerge;
@@ -73,13 +77,11 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
-/**
- *
- */
public class InternalEngine extends Engine {
/**
* When we last pruned expired tombstones from versionMap.deletes:
@@ -114,11 +116,20 @@ public class InternalEngine extends Engine {
// incoming indexing ops to a single thread:
private final AtomicInteger throttleRequestCount = new AtomicInteger();
private final EngineConfig.OpenMode openMode;
- private final AtomicBoolean allowCommits = new AtomicBoolean(true);
+ private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false);
+ private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
+ private final CounterMetric numVersionLookups = new CounterMetric();
+ private final CounterMetric numIndexVersionsLookups = new CounterMetric();
public InternalEngine(EngineConfig engineConfig) throws EngineException {
super(engineConfig);
openMode = engineConfig.getOpenMode();
+ if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_beta1)) {
+ // no optimization for pre 5.0.0.alpha6 since translog might not have all information needed
+ maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE);
+ } else {
+ maxUnsafeAutoIdTimestamp.set(engineConfig.getMaxUnsafeAutoIdTimestamp());
+ }
this.versionMap = new LiveVersionMap();
store.incRef();
IndexWriter writer = null;
@@ -152,11 +163,11 @@ public class InternalEngine extends Engine {
manager = createSearcherManager();
this.searcherManager = manager;
this.versionMap.setManager(searcherManager);
+ assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it";
// don't allow commits until we are done with recovering
- allowCommits.compareAndSet(true, openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
+ pendingTranslogRecovery.set(openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
if (engineConfig.getRefreshListeners() != null) {
searcherManager.addListener(engineConfig.getRefreshListeners());
- engineConfig.getRefreshListeners().setTranslog(translog);
}
success = true;
} finally {
@@ -180,14 +191,14 @@ public class InternalEngine extends Engine {
if (openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) {
throw new IllegalStateException("Can't recover from translog with open mode: " + openMode);
}
- if (allowCommits.get()) {
+ if (pendingTranslogRecovery.get() == false) {
throw new IllegalStateException("Engine has already been recovered");
}
try {
recoverFromTranslog(engineConfig.getTranslogRecoveryPerformer());
} catch (Exception e) {
try {
- allowCommits.set(false); // just play safe and never allow commits on this
+ pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush
failEngine("failed to recover from translog", e);
} catch (Exception inner) {
e.addSuppressed(inner);
@@ -211,8 +222,8 @@ public class InternalEngine extends Engine {
}
// flush if we recovered something or if we have references to older translogs
// note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length.
- assert allowCommits.get() == false : "commits are allowed but shouldn't";
- allowCommits.set(true); // we are good - now we can commit
+ assert pendingTranslogRecovery.get(): "translogRecovery is not pending but should be";
+ pendingTranslogRecovery.set(false); // we are good - now we can commit
if (opsRecovered > 0) {
logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration());
@@ -328,10 +339,7 @@ public class InternalEngine extends Engine {
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
get.versionType().explainConflictForReads(versionValue.version(), get.version()));
}
- Translog.Operation op = translog.read(versionValue.translogLocation());
- if (op != null) {
- return new GetResult(true, versionValue.version(), op.getSource());
- }
+ refresh("realtime_get");
}
}
@@ -368,11 +376,11 @@ public class InternalEngine extends Engine {
return currentVersion;
}
- private static VersionValueSupplier NEW_VERSION_VALUE = (u, t, l) -> new VersionValue(u, l);
+ private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u);
@FunctionalInterface
private interface VersionValueSupplier {
- VersionValue apply(long updatedVersion, long time, Translog.Location location);
+ VersionValue apply(long updatedVersion, long time);
}
private <T extends Engine.Operation> void maybeAddToTranslog(
@@ -383,27 +391,21 @@ public class InternalEngine extends Engine {
if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op));
op.setTranslogLocation(translogLocation);
- versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), op.getTranslogLocation()));
- } else {
- // we do not replay in to the translog, so there is no
- // translog location; that is okay because real-time
- // gets are not possible during recovery and we will
- // flush when the recovery is complete
- versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null));
}
+ versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
+
}
@Override
- public boolean index(Index index) {
- final boolean created;
+ public void index(Index index) {
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
if (index.origin().isRecovery()) {
// Don't throttle recovery operations
- created = innerIndex(index);
+ innerIndex(index);
} else {
try (Releasable r = throttle.acquireThrottle()) {
- created = innerIndex(index);
+ innerIndex(index);
}
}
} catch (IllegalStateException | IOException e) {
@@ -414,33 +416,109 @@ public class InternalEngine extends Engine {
}
throw new IndexFailedEngineException(shardId, index.type(), index.id(), e);
}
- return created;
}
- private boolean innerIndex(Index index) throws IOException {
+ private boolean canOptimizeAddDocument(Index index) {
+ if (index.getAutoGeneratedIdTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) {
+ assert index.getAutoGeneratedIdTimestamp() >= 0 : "autoGeneratedIdTimestamp must be positive but was: "
+ + index.getAutoGeneratedIdTimestamp();
+ switch (index.origin()) {
+ case PRIMARY:
+ assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL)
+ : "version: " + index.version() + " type: " + index.versionType();
+ return true;
+ case PEER_RECOVERY:
+ case REPLICA:
+ assert index.version() == 1 && index.versionType() == VersionType.EXTERNAL
+ : "version: " + index.version() + " type: " + index.versionType();
+ return true;
+ case LOCAL_TRANSLOG_RECOVERY:
+ assert index.isRetry();
+ return false; // even if retry is set we never optimize local recovery
+ default:
+ throw new IllegalArgumentException("unknown origin " + index.origin());
+ }
+ }
+ return false;
+ }
+
+ private void innerIndex(Index index) throws IOException {
try (Releasable ignored = acquireLock(index.uid())) {
lastWriteNanos = index.startTime();
- final long currentVersion;
+ /* if we have an autoGeneratedID that comes into the engine we can potentially optimize
+ * and just use addDocument instead of updateDocument and skip the entire version and index lookup across the board.
+ * Yet, we have to deal with multiple document delivery, for this we use a property of the document that is added
+ * to detect if it has potentially been added before. We use the documents timestamp for this since it's something
+ * that:
+ * - doesn't change per document
+ * - is preserved in the transaction log
+ * - and is assigned before we start to index / replicate
+ * NOTE: it's not important for this timestamp to be consistent across nodes etc. it's just a number that is in the common
+ * case increasing and can be used in the failure case when we retry and resent documents to establish a happens before relationship.
+ * for instance:
+ * - doc A has autoGeneratedIdTimestamp = 10, isRetry = false
+ * - doc B has autoGeneratedIdTimestamp = 9, isRetry = false
+ *
+ * while both docs are in in flight, we disconnect on one node, reconnect and send doc A again
+ * - now doc A' has autoGeneratedIdTimestamp = 10, isRetry = true
+ *
+ * if A' arrives on the shard first we update maxUnsafeAutoIdTimestamp to 10 and use update document. All subsequent
+ * documents that arrive (A and B) will also use updateDocument since their timestamps are less than maxUnsafeAutoIdTimestamp.
+ * While this is not strictly needed for doc B it is just much simpler to implement since it will just de-optimize some doc in the worst case.
+ *
+ * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls
+ * updateDocument.
+ */
+ long currentVersion;
final boolean deleted;
- final VersionValue versionValue = versionMap.getUnderLock(index.uid());
- if (versionValue == null) {
- currentVersion = loadCurrentVersionFromIndex(index.uid());
- deleted = currentVersion == Versions.NOT_FOUND;
+ // if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the
+ // lucene index without checking the version map but we still do the version check
+ final boolean forceUpdateDocument;
+ if (canOptimizeAddDocument(index)) {
+ long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get();
+ if (index.isRetry()) {
+ forceUpdateDocument = true;
+ do {
+ deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get();
+ if (deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp()) {
+ break;
+ }
+ } while(maxUnsafeAutoIdTimestamp.compareAndSet(deOptimizeTimestamp,
+ index.getAutoGeneratedIdTimestamp()) == false);
+ assert maxUnsafeAutoIdTimestamp.get() >= index.getAutoGeneratedIdTimestamp();
+ } else {
+ // in this case we force
+ forceUpdateDocument = deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp();
+ }
+ currentVersion = Versions.NOT_FOUND;
+ deleted = true;
} else {
- currentVersion = checkDeletedAndGCed(versionValue);
- deleted = versionValue.delete();
+ // update the document
+ forceUpdateDocument = false; // we don't force it - it depends on the version
+ final VersionValue versionValue = versionMap.getUnderLock(index.uid());
+ assert incrementVersionLookup();
+ if (versionValue == null) {
+ currentVersion = loadCurrentVersionFromIndex(index.uid());
+ deleted = currentVersion == Versions.NOT_FOUND;
+ } else {
+ currentVersion = checkDeletedAndGCed(versionValue);
+ deleted = versionValue.delete();
+ }
}
-
final long expectedVersion = index.version();
- if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) return false;
-
+ if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
+ index.setCreated(false);
+ return;
+ }
final long updatedVersion = updateVersion(index, currentVersion, expectedVersion);
-
- final boolean created = indexOrUpdate(index, currentVersion, versionValue);
-
+ index.setCreated(deleted);
+ if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
+ // document does not exists, we can optimize for create
+ index(index, indexWriter);
+ } else {
+ update(index, indexWriter);
+ }
maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE);
-
- return created;
}
}
@@ -450,18 +528,6 @@ public class InternalEngine extends Engine {
return updatedVersion;
}
- private boolean indexOrUpdate(final Index index, final long currentVersion, final VersionValue versionValue) throws IOException {
- final boolean created;
- if (currentVersion == Versions.NOT_FOUND) {
- // document does not exists, we can optimize for create
- created = true;
- index(index, indexWriter);
- } else {
- created = update(index, versionValue, indexWriter);
- }
- return created;
- }
-
private static void index(final Index index, final IndexWriter indexWriter) throws IOException {
if (index.docs().size() > 1) {
indexWriter.addDocuments(index.docs());
@@ -470,19 +536,12 @@ public class InternalEngine extends Engine {
}
}
- private static boolean update(final Index index, final VersionValue versionValue, final IndexWriter indexWriter) throws IOException {
- final boolean created;
- if (versionValue != null) {
- created = versionValue.delete(); // we have a delete which is not GC'ed...
- } else {
- created = false;
- }
+ private static void update(final Index index, final IndexWriter indexWriter) throws IOException {
if (index.docs().size() > 1) {
indexWriter.updateDocuments(index.uid(), index.docs());
} else {
indexWriter.updateDocument(index.uid(), index.docs().get(0));
}
- return created;
}
@Override
@@ -517,6 +576,7 @@ public class InternalEngine extends Engine {
final long currentVersion;
final boolean deleted;
final VersionValue versionValue = versionMap.getUnderLock(delete.uid());
+ assert incrementVersionLookup();
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(delete.uid());
deleted = currentVersion == Versions.NOT_FOUND;
@@ -562,8 +622,8 @@ public class InternalEngine extends Engine {
ensureOpen();
searcherManager.maybeRefreshBlocking();
} catch (AlreadyClosedException e) {
- ensureOpen();
- maybeFailEngine("refresh", e);
+ failOnTragicEvent(e);
+ throw e;
} catch (EngineClosedException e) {
throw e;
} catch (Exception e) {
@@ -610,8 +670,8 @@ public class InternalEngine extends Engine {
indexWriter.flush();
}
} catch (AlreadyClosedException e) {
- ensureOpen();
- maybeFailEngine("writeIndexingBuffer", e);
+ failOnTragicEvent(e);
+ throw e;
} catch (EngineClosedException e) {
throw e;
} catch (Exception e) {
@@ -706,7 +766,7 @@ public class InternalEngine extends Engine {
flushLock.lock();
logger.trace("acquired flush lock after blocking");
} else {
- throw new FlushNotAllowedEngineException(shardId, "already flushing...");
+ return new CommitId(lastCommittedSegmentInfos.getId());
}
} else {
logger.trace("acquired flush lock immediately");
@@ -726,30 +786,30 @@ public class InternalEngine extends Engine {
} catch (Exception e) {
throw new FlushFailedEngineException(shardId, e);
}
- }
- /*
- * we have to inc-ref the store here since if the engine is closed by a tragic event
- * we don't acquire the write lock and wait until we have exclusive access. This might also
- * dec the store reference which can essentially close the store and unless we can inc the reference
- * we can't use it.
- */
- store.incRef();
- try {
- // reread the last committed segment infos
- lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
- } catch (Exception e) {
- if (isClosed.get() == false) {
- try {
- logger.warn("failed to read latest segment infos on flush", e);
- } catch (Exception inner) {
- e.addSuppressed(inner);
- }
- if (Lucene.isCorruptionException(e)) {
- throw new FlushFailedEngineException(shardId, e);
+ /*
+ * we have to inc-ref the store here since if the engine is closed by a tragic event
+ * we don't acquire the write lock and wait until we have exclusive access. This might also
+ * dec the store reference which can essentially close the store and unless we can inc the reference
+ * we can't use it.
+ */
+ store.incRef();
+ try {
+ // reread the last committed segment infos
+ lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
+ } catch (Exception e) {
+ if (isClosed.get() == false) {
+ try {
+ logger.warn("failed to read latest segment infos on flush", e);
+ } catch (Exception inner) {
+ e.addSuppressed(inner);
+ }
+ if (Lucene.isCorruptionException(e)) {
+ throw new FlushFailedEngineException(shardId, e);
+ }
}
+ } finally {
+ store.decRef();
}
- } finally {
- store.decRef();
}
newCommitId = lastCommittedSegmentInfos.getId();
} catch (FlushFailedEngineException ex) {
@@ -835,6 +895,14 @@ public class InternalEngine extends Engine {
} finally {
store.decRef();
}
+ } catch (AlreadyClosedException ex) {
+ /* in this case we first check if the engine is still open. If so this exception is just fine
+ * and expected. We don't hold any locks while we block on forceMerge otherwise it would block
+ * closing the engine as well. If we are not closed we pass it on to failOnTragicEvent which ensures
+ * we are handling a tragic even exception here */
+ ensureOpen();
+ failOnTragicEvent(ex);
+ throw ex;
} catch (Exception e) {
try {
maybeFailEngine("force merge", e);
@@ -869,26 +937,35 @@ public class InternalEngine extends Engine {
}
}
+ private void failOnTragicEvent(AlreadyClosedException ex) {
+ // if we are already closed due to some tragic exception
+ // we need to fail the engine. it might have already been failed before
+ // but we are double-checking it's failed and closed
+ if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
+ final Exception tragedy = indexWriter.getTragicException() instanceof Exception ?
+ (Exception) indexWriter.getTragicException() :
+ new Exception(indexWriter.getTragicException());
+ failEngine("already closed by tragic event on the index writer", tragedy);
+ } else if (translog.isOpen() == false && translog.getTragicException() != null) {
+ failEngine("already closed by tragic event on the translog", translog.getTragicException());
+ } else if (failedEngine.get() == null) { // we are closed but the engine is not failed yet?
+ // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by
+ // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error
+ throw new AssertionError("Unexpected AlreadyClosedException", ex);
+ }
+ }
+
@Override
protected boolean maybeFailEngine(String source, Exception e) {
boolean shouldFail = super.maybeFailEngine(source, e);
if (shouldFail) {
return true;
}
-
- // Check for AlreadyClosedException
+ // Check for AlreadyClosedException -- ACE is a very special
+ // exception that should only be thrown in a tragic event. we pass on the checks to failOnTragicEvent which will
+ // throw and AssertionError if the tragic event condition is not met.
if (e instanceof AlreadyClosedException) {
- // if we are already closed due to some tragic exception
- // we need to fail the engine. it might have already been failed before
- // but we are double-checking it's failed and closed
- if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
- final Exception tragedy = indexWriter.getTragicException() instanceof Exception ?
- (Exception) indexWriter.getTragicException() :
- new Exception(indexWriter.getTragicException());
- failEngine("already closed by tragic event on the index writer", tragedy);
- } else if (translog.isOpen() == false && translog.getTragicException() != null) {
- failEngine("already closed by tragic event on the translog", translog.getTragicException());
- }
+ failOnTragicEvent((AlreadyClosedException)e);
return true;
} else if (e != null &&
((indexWriter.isOpen() == false && indexWriter.getTragicException() == e)
@@ -910,10 +987,12 @@ public class InternalEngine extends Engine {
protected final void writerSegmentStats(SegmentsStats stats) {
stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed());
stats.addIndexWriterMemoryInBytes(indexWriter.ramBytesUsed());
+ stats.updateMaxUnsafeAutoIdTimestamp(maxUnsafeAutoIdTimestamp.get());
}
@Override
public long getIndexBufferRAMBytesUsed() {
+ // We don't guard w/ readLock here, so we could throw AlreadyClosedException
return indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh();
}
@@ -963,8 +1042,9 @@ public class InternalEngine extends Engine {
logger.trace("rollback indexWriter");
try {
indexWriter.rollback();
- } catch (AlreadyClosedException e) {
- // ignore
+ } catch (AlreadyClosedException ex) {
+ failOnTragicEvent(ex);
+ throw ex;
}
logger.trace("rollback indexWriter done");
} catch (Exception e) {
@@ -990,6 +1070,7 @@ public class InternalEngine extends Engine {
}
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
+ assert incrementIndexVersionLookup();
try (final Searcher searcher = acquireSearcher("load_version")) {
return Versions.loadVersion(searcher.reader(), uid);
}
@@ -1015,7 +1096,7 @@ public class InternalEngine extends Engine {
mergePolicy = new ElasticsearchMergePolicy(mergePolicy);
iwc.setMergePolicy(mergePolicy);
iwc.setSimilarity(engineConfig.getSimilarity());
- iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
+ iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
iwc.setCodec(engineConfig.getCodec());
iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
return new IndexWriter(store.directory(), iwc);
@@ -1028,10 +1109,10 @@ public class InternalEngine extends Engine {
/** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */
static final class SearchFactory extends EngineSearcherFactory {
private final Engine.Warmer warmer;
- private final ESLogger logger;
+ private final Logger logger;
private final AtomicBoolean isEngineClosed;
- SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) {
+ SearchFactory(Logger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) {
super(engineConfig);
warmer = engineConfig.getWarmer();
this.logger = logger;
@@ -1207,8 +1288,8 @@ public class InternalEngine extends Engine {
// if we are in this stage we have to prevent flushes from this
// engine otherwise we might loose documents if the flush succeeds
// and the translog recover fails we we "commit" the translog on flush.
- if (allowCommits.get() == false) {
- throw new FlushNotAllowedEngineException(shardId, "flushes are disabled - pending translog recovery");
+ if (pendingTranslogRecovery.get()) {
+ throw new IllegalStateException(shardId.toString() + " flushes are disabled - pending translog recovery");
}
}
@@ -1216,6 +1297,12 @@ public class InternalEngine extends Engine {
mergeScheduler.refreshConfig();
// config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed:
maybePruneDeletedTombstones();
+ if (engineConfig.getMaxUnsafeAutoIdTimestamp() == Long.MAX_VALUE) {
+ // this is an anti-viral settings you can only opt out for the entire index
+ // only if a shard starts up again due to relocation or if the index is closed
+ // the setting will be re-interpreted if it's set to true
+ this.maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE);
+ }
}
public MergeStats getMergeStats() {
@@ -1228,4 +1315,44 @@ public class InternalEngine extends Engine {
final int maxDoc = indexWriter.maxDoc();
return new DocsStats(numDocs, maxDoc-numDocs);
}
+
+
+ /**
+ * Returns the number of times a version was looked up either from the index.
+ * Note this is only available if assertions are enabled
+ */
+ long getNumIndexVersionsLookups() { // for testing
+ return numIndexVersionsLookups.count();
+ }
+
+ /**
+ * Returns the number of times a version was looked up either from memory or from the index.
+ * Note this is only available if assertions are enabled
+ */
+ long getNumVersionLookups() { // for testing
+ return numVersionLookups.count();
+ }
+
+ private boolean incrementVersionLookup() { // only used by asserts
+ numVersionLookups.inc();
+ return true;
+ }
+
+ private boolean incrementIndexVersionLookup() {
+ numIndexVersionsLookups.inc();
+ return true;
+ }
+
+ /**
+ * Returns <code>true</code> iff the index writer has any deletions either buffered in memory or
+ * in the index.
+ */
+ boolean indexWriterHasDeletions() {
+ return indexWriter.hasDeletions();
+ }
+
+ @Override
+ public boolean isRecovering() {
+ return pendingTranslogRecovery.get();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
index 3cf3c83749..b489cec576 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java
@@ -42,7 +42,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
// Used while refresh is running, and to hold adds/deletes until refresh finishes. We read from both current and old on lookup:
final Map<BytesRef,VersionValue> old;
-
+
public Maps(Map<BytesRef,VersionValue> current, Map<BytesRef,VersionValue> old) {
this.current = current;
this.old = old;
@@ -59,26 +59,37 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
private volatile Maps maps = new Maps();
- private ReferenceManager mgr;
+ private ReferenceManager<?> mgr;
/** Bytes consumed for each BytesRef UID:
- *
- * NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */
- private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
- 2*Integer.BYTES +
- RamUsageEstimator.NUM_BYTES_OBJECT_REF +
- RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
-
- /** Bytes used by having CHM point to a key/value:
- *
- * CHM.Entry:
- * + NUM_BYTES_OBJECT_HEADER + 3*NUM_BYTES_OBJECT_REF + NUM_BYTES_INT
- *
- * CHM's pointer to CHM.Entry, double for approx load factor:
- * + 2*NUM_BYTES_OBJECT_REF */
- private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
- Integer.BYTES +
- 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+ * In this base value, we account for the {@link BytesRef} object itself as
+ * well as the header of the byte[] array it holds, and some lost bytes due
+ * to object alignment. So consumers of this constant just have to add the
+ * length of the byte[] (assuming it is not shared between multiple
+ * instances). */
+ private static final long BASE_BYTES_PER_BYTESREF =
+ // shallow memory usage of the BytesRef object
+ RamUsageEstimator.shallowSizeOfInstance(BytesRef.class) +
+ // header of the byte[] array
+ RamUsageEstimator.NUM_BYTES_ARRAY_HEADER +
+ // with an alignment size (-XX:ObjectAlignmentInBytes) of 8 (default),
+ // there could be between 0 and 7 lost bytes, so we account for 3
+ // lost bytes on average
+ 3;
+
+ /** Bytes used by having CHM point to a key/value. */
+ private static final long BASE_BYTES_PER_CHM_ENTRY;
+ static {
+ // use the same impl as the Maps does
+ Map<Integer, Integer> map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
+ map.put(0, 0);
+ long chmEntryShallowSize = RamUsageEstimator.shallowSizeOf(map.entrySet().iterator().next());
+ // assume a load factor of 50%
+ // for each entry, we need two object refs, one for the entry itself
+ // and one for the free space that is due to the fact hash tables can
+ // not be fully loaded
+ BASE_BYTES_PER_CHM_ENTRY = chmEntryShallowSize + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+ }
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account
* for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not clear this RAM. */
@@ -88,7 +99,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
final AtomicLong ramBytesUsedTombstones = new AtomicLong();
/** Sync'd because we replace old mgr. */
- synchronized void setManager(ReferenceManager newMgr) {
+ synchronized void setManager(ReferenceManager<?> newMgr) {
if (mgr != null) {
mgr.removeListener(this);
}
@@ -146,7 +157,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
/** Adds this uid/version to the pending adds map. */
void putUnderLock(BytesRef uid, VersionValue version) {
-
+ assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length;
long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length;
final VersionValue prev = maps.current.put(uid, version);
@@ -245,7 +256,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
return ramBytesUsedCurrent.get() + ramBytesUsedTombstones.get();
}
- /** Returns how much RAM would be freed up by refreshing. This is {@link ramBytesUsed} except does not include tombstones because they
+ /** Returns how much RAM would be freed up by refreshing. This is {@link #ramBytesUsed} except does not include tombstones because they
* don't clear on refresh. */
long ramBytesUsedForRefresh() {
return ramBytesUsedCurrent.get();
diff --git a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
index 8f0388aef0..637beebfec 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
@@ -44,6 +44,7 @@ public class SegmentsStats implements Streamable, ToXContent {
private long docValuesMemoryInBytes;
private long indexWriterMemoryInBytes;
private long versionMapMemoryInBytes;
+ private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
private long bitsetMemoryInBytes;
private ImmutableOpenMap<String, Long> fileSizes = ImmutableOpenMap.of();
@@ -114,6 +115,10 @@ public class SegmentsStats implements Streamable, ToXContent {
this.versionMapMemoryInBytes += versionMapMemoryInBytes;
}
+ void updateMaxUnsafeAutoIdTimestamp(long maxUnsafeAutoIdTimestamp) {
+ this.maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, this.maxUnsafeAutoIdTimestamp);
+ }
+
public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) {
this.bitsetMemoryInBytes += bitsetMemoryInBytes;
}
@@ -138,6 +143,7 @@ public class SegmentsStats implements Streamable, ToXContent {
if (mergeStats == null) {
return;
}
+ updateMaxUnsafeAutoIdTimestamp(mergeStats.maxUnsafeAutoIdTimestamp);
add(mergeStats.count, mergeStats.memoryInBytes);
addTermsMemoryInBytes(mergeStats.termsMemoryInBytes);
addStoredFieldsMemoryInBytes(mergeStats.storedFieldsMemoryInBytes);
@@ -272,6 +278,14 @@ public class SegmentsStats implements Streamable, ToXContent {
return fileSizes;
}
+ /**
+ * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine.
+ * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs
+ */
+ public long getMaxUnsafeAutoIdTimestamp() {
+ return maxUnsafeAutoIdTimestamp;
+ }
+
public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
SegmentsStats stats = new SegmentsStats();
stats.readFrom(in);
@@ -292,6 +306,7 @@ public class SegmentsStats implements Streamable, ToXContent {
builder.byteSizeField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, indexWriterMemoryInBytes);
builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes);
builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes);
+ builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp);
builder.startObject(Fields.FILE_SIZES);
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
ObjectObjectCursor<String, Long> entry = it.next();
@@ -326,6 +341,7 @@ public class SegmentsStats implements Streamable, ToXContent {
static final String INDEX_WRITER_MEMORY_IN_BYTES = "index_writer_memory_in_bytes";
static final String VERSION_MAP_MEMORY = "version_map_memory";
static final String VERSION_MAP_MEMORY_IN_BYTES = "version_map_memory_in_bytes";
+ static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP = "max_unsafe_auto_id_timestamp";
static final String FIXED_BIT_SET = "fixed_bit_set";
static final String FIXED_BIT_SET_MEMORY_IN_BYTES = "fixed_bit_set_memory_in_bytes";
static final String FILE_SIZES = "file_sizes";
@@ -347,6 +363,7 @@ public class SegmentsStats implements Streamable, ToXContent {
indexWriterMemoryInBytes = in.readLong();
versionMapMemoryInBytes = in.readLong();
bitsetMemoryInBytes = in.readLong();
+ maxUnsafeAutoIdTimestamp = in.readLong();
int size = in.readVInt();
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(size);
@@ -371,6 +388,7 @@ public class SegmentsStats implements Streamable, ToXContent {
out.writeLong(indexWriterMemoryInBytes);
out.writeLong(versionMapMemoryInBytes);
out.writeLong(bitsetMemoryInBytes);
+ out.writeLong(maxUnsafeAutoIdTimestamp);
out.writeVInt(fileSizes.size());
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java
index 2d5a134493..3aafcaff74 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java
@@ -106,7 +106,7 @@ public class ShadowEngine extends Engine {
@Override
- public boolean index(Index index) throws EngineException {
+ public void index(Index index) throws EngineException {
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
}
@@ -191,7 +191,8 @@ public class ShadowEngine extends Engine {
ensureOpen();
searcherManager.maybeRefreshBlocking();
} catch (AlreadyClosedException e) {
- ensureOpen();
+ // This means there's a bug somewhere: don't suppress it
+ throw new AssertionError(e);
} catch (EngineClosedException e) {
throw e;
} catch (Exception e) {
diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java
index 20116a47b1..662c88df5d 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java
@@ -21,19 +21,18 @@ package org.elasticsearch.index.engine;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
-import org.elasticsearch.index.translog.Translog;
import java.util.Collection;
import java.util.Collections;
class VersionValue implements Accountable {
+ private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class);
+
private final long version;
- private final Translog.Location translogLocation;
- public VersionValue(long version, Translog.Location translogLocation) {
+ public VersionValue(long version) {
this.version = version;
- this.translogLocation = translogLocation;
}
public long time() {
@@ -48,14 +47,10 @@ class VersionValue implements Accountable {
return false;
}
- public Translog.Location translogLocation() {
- return this.translogLocation;
- }
@Override
public long ramBytesUsed() {
- return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF +
- (translogLocation != null ? translogLocation.size : 0);
+ return BASE_RAM_BYTES_USED;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java
index 2fa4476c0d..aaecf2fa89 100644
--- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.fielddata.ordinals;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
@@ -26,7 +27,6 @@ import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.common.breaker.CircuitBreaker;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
@@ -48,7 +48,7 @@ public enum GlobalOrdinalsBuilder {
/**
* Build global ordinals for the provided {@link IndexReader}.
*/
- public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, ESLogger logger) throws IOException {
+ public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger) throws IOException {
assert indexReader.leaves().size() > 1;
long startTimeNS = System.nanoTime();
diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java
new file mode 100644
index 0000000000..7ce6eb9588
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.document.LatLonDocValuesField;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.search.MultiValueMode;
+
+import java.io.IOException;
+
+public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndexFieldData
+ implements IndexGeoPointFieldData {
+ AbstractLatLonPointDVIndexFieldData(Index index, String fieldName) {
+ super(index, fieldName);
+ }
+
+ @Override
+ public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode,
+ XFieldComparatorSource.Nested nested) {
+ throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
+ }
+
+ public static class LatLonPointDVIndexFieldData extends AbstractLatLonPointDVIndexFieldData {
+ public LatLonPointDVIndexFieldData(Index index, String fieldName) {
+ super(index, fieldName);
+ }
+
+ @Override
+ public AtomicGeoPointFieldData load(LeafReaderContext context) {
+ try {
+ LeafReader reader = context.reader();
+ FieldInfo info = reader.getFieldInfos().fieldInfo(fieldName);
+ if (info != null) {
+ checkCompatible(info);
+ }
+ return new LatLonPointDVAtomicFieldData(DocValues.getSortedNumeric(reader, fieldName));
+ } catch (IOException e) {
+ throw new IllegalStateException("Cannot load doc values", e);
+ }
+ }
+
+ @Override
+ public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
+ return load(context);
+ }
+
+ /** helper: checks a fieldinfo and throws exception if its definitely not a LatLonDocValuesField */
+ static void checkCompatible(FieldInfo fieldInfo) {
+ // dv properties could be "unset", if you e.g. used only StoredField with this same name in the segment.
+ if (fieldInfo.getDocValuesType() != DocValuesType.NONE
+ && fieldInfo.getDocValuesType() != LatLonDocValuesField.TYPE.docValuesType()) {
+ throw new IllegalArgumentException("field=\"" + fieldInfo.name + "\" was indexed with docValuesType="
+ + fieldInfo.getDocValuesType() + " but this type has docValuesType="
+ + LatLonDocValuesField.TYPE.docValuesType() + ", is the field really a LatLonDocValuesField?");
+ }
+ }
+ }
+
+ public static class Builder implements IndexFieldData.Builder {
+ @Override
+ public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService, MapperService mapperService) {
+ // ignore breaker
+ return new LatLonPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name());
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java
index 83bdaf221b..4621876399 100644
--- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java
+++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java
@@ -19,10 +19,9 @@
package org.elasticsearch.index.fielddata.plain;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexReader;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.IndexFieldData;
@@ -34,7 +33,6 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
-import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableSet;
@@ -45,7 +43,7 @@ public abstract class DocValuesIndexFieldData {
protected final Index index;
protected final String fieldName;
- protected final ESLogger logger;
+ protected final Logger logger;
public DocValuesIndexFieldData(Index index, String fieldName) {
super();
diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/LatLonPointDVAtomicFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/LatLonPointDVAtomicFieldData.java
new file mode 100644
index 0000000000..d11a79c255
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/LatLonPointDVAtomicFieldData.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.geo.GeoEncodingUtils;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.index.fielddata.MultiGeoPointValues;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+
+final class LatLonPointDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
+ private final SortedNumericDocValues values;
+
+ LatLonPointDVAtomicFieldData(SortedNumericDocValues values) {
+ super();
+ this.values = values;
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return 0; // not exposed by lucene
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void close() {
+ // noop
+ }
+
+ @Override
+ public MultiGeoPointValues getGeoPointValues() {
+ return new MultiGeoPointValues() {
+ GeoPoint[] points = new GeoPoint[0];
+ private int count = 0;
+
+ @Override
+ public void setDocument(int docId) {
+ values.setDocument(docId);
+ count = values.count();
+ if (count > points.length) {
+ final int previousLength = points.length;
+ points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ for (int i = previousLength; i < points.length; ++i) {
+ points[i] = new GeoPoint(Double.NaN, Double.NaN);
+ }
+ }
+ long encoded;
+ for (int i=0; i<count; ++i) {
+ encoded = values.valueAt(i);
+ points[i].reset(GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)), GeoEncodingUtils.decodeLongitude((int)encoded));
+ }
+ }
+
+ @Override
+ public int count() {
+ return count;
+ }
+
+ @Override
+ public GeoPoint valueAt(int index) {
+ return points[index];
+ }
+ };
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/index/get/GetResult.java b/core/src/main/java/org/elasticsearch/index/get/GetResult.java
index 0fa843adc4..b688ed4423 100644
--- a/core/src/main/java/org/elasticsearch/index/get/GetResult.java
+++ b/core/src/main/java/org/elasticsearch/index/get/GetResult.java
@@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
@@ -229,7 +230,7 @@ public class GetResult implements Streamable, Iterable<GetField>, ToXContent {
builder.field(Fields.FOUND, exists);
if (source != null) {
- XContentHelper.writeRawField("_source", source, builder, params);
+ XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params);
}
if (!otherFields.isEmpty()) {
diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java
index b3676b984c..c49fb2c3cc 100644
--- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java
+++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java
@@ -41,28 +41,19 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParentFieldMapper;
-import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
-import org.elasticsearch.index.mapper.TTLFieldMapper;
-import org.elasticsearch.index.mapper.TimestampFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.ParentFieldSubFetchPhase;
-import org.elasticsearch.search.lookup.LeafSearchLookup;
-import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
-import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
@@ -85,12 +76,11 @@ public final class ShardGetService extends AbstractIndexShardComponent {
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
}
-
- public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
+ public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
currentMetric.inc();
try {
long now = System.nanoTime();
- GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, ignoreErrorsOnGeneratedFields);
+ GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now);
@@ -149,7 +139,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
return FetchSourceContext.DO_NOT_FETCH_SOURCE;
}
- private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
+ private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
Engine.GetResult get = null;
@@ -182,140 +172,12 @@ public final class ShardGetService extends AbstractIndexShardComponent {
try {
// break between having loaded it from translog (so we only have _source), and having a document to load
- if (get.docIdAndVersion() != null) {
- return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, mapperService);
- } else {
- Translog.Source source = get.source();
-
- Map<String, GetField> fields = null;
- SearchLookup searchLookup = null;
-
- // we can only load scripts that can run against the source
- Set<String> neededFields = new HashSet<>();
- // add meta fields
- neededFields.add(RoutingFieldMapper.NAME);
- DocumentMapper docMapper = mapperService.documentMapper(type);
- if (docMapper.parentFieldMapper().active()) {
- neededFields.add(ParentFieldMapper.NAME);
- }
- if (docMapper.timestampFieldMapper().enabled()) {
- neededFields.add(TimestampFieldMapper.NAME);
- }
- if (docMapper.TTLFieldMapper().enabled()) {
- neededFields.add(TTLFieldMapper.NAME);
- }
- // add requested fields
- if (gFields != null) {
- neededFields.addAll(Arrays.asList(gFields));
- }
- for (String field : neededFields) {
- if (SourceFieldMapper.NAME.equals(field)) {
- // dealt with when normalizing fetchSourceContext.
- continue;
- }
- Object value = null;
- if (field.equals(RoutingFieldMapper.NAME)) {
- value = source.routing;
- } else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active()) {
- value = source.parent;
- } else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().enabled()) {
- value = source.timestamp;
- } else if (field.equals(TTLFieldMapper.NAME) && docMapper.TTLFieldMapper().enabled()) {
- // Call value for search with timestamp + ttl here to display the live remaining ttl value and be consistent with the search result display
- if (source.ttl > 0) {
- value = docMapper.TTLFieldMapper().valueForSearch(source.timestamp + source.ttl);
- }
- } else {
- if (searchLookup == null) {
- searchLookup = new SearchLookup(mapperService, null, new String[]{type});
- searchLookup.source().setSource(source.source);
- }
-
- FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
- if (fieldMapper == null) {
- if (docMapper.objectMappers().get(field) != null) {
- // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
- throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
- }
- } else if (shouldGetFromSource(ignoreErrorsOnGeneratedFields, docMapper, fieldMapper)) {
- List<Object> values = searchLookup.source().extractRawValues(field);
- if (!values.isEmpty()) {
- value = values;
- }
-
- }
- }
- if (value != null) {
- if (fields == null) {
- fields = new HashMap<>(2);
- }
- if (value instanceof List) {
- fields.put(field, new GetField(field, (List) value));
- } else {
- fields.put(field, new GetField(field, Collections.singletonList(value)));
- }
- }
- }
-
- // deal with source, but only if it's enabled (we always have it from the translog)
- BytesReference sourceToBeReturned = null;
- SourceFieldMapper sourceFieldMapper = docMapper.sourceMapper();
- if (fetchSourceContext.fetchSource() && sourceFieldMapper.enabled()) {
-
- sourceToBeReturned = source.source;
-
- // Cater for source excludes/includes at the cost of performance
- // We must first apply the field mapper filtering to make sure we get correct results
- // in the case that the fetchSourceContext white lists something that's not included by the field mapper
-
- boolean sourceFieldFiltering = sourceFieldMapper.includes().length > 0 || sourceFieldMapper.excludes().length > 0;
- boolean sourceFetchFiltering = fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0;
- if (sourceFieldFiltering || sourceFetchFiltering) {
- // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
- Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
- XContentType sourceContentType = typeMapTuple.v1();
- Map<String, Object> sourceAsMap = typeMapTuple.v2();
- if (sourceFieldFiltering) {
- sourceAsMap = XContentMapValues.filter(sourceAsMap, sourceFieldMapper.includes(), sourceFieldMapper.excludes());
- }
- if (sourceFetchFiltering) {
- sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes());
- }
- try {
- sourceToBeReturned = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
- } catch (IOException e) {
- throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
- }
- }
- }
-
- return new GetResult(shardId.getIndexName(), type, id, get.version(), get.exists(), sourceToBeReturned, fields);
- }
+ return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, mapperService);
} finally {
get.release();
}
}
- protected boolean shouldGetFromSource(boolean ignoreErrorsOnGeneratedFields, DocumentMapper docMapper, FieldMapper fieldMapper) {
- if (!fieldMapper.isGenerated()) {
- //if the field is always there we check if either source mapper is enabled, in which case we get the field
- // from source, or, if the field is stored, in which case we have to get if from source here also (we are in the translog phase, doc not indexed yet, we annot access the stored fields)
- return docMapper.sourceMapper().enabled() || fieldMapper.fieldType().stored();
- } else {
- if (!fieldMapper.fieldType().stored()) {
- //if it is not stored, user will not get the generated field back
- return false;
- } else {
- if (ignoreErrorsOnGeneratedFields) {
- return false;
- } else {
- throw new ElasticsearchException("Cannot access field " + fieldMapper.name() + " from transaction log. You can only get this field after refresh() has been called.");
- }
- }
-
- }
- }
-
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
Map<String, GetField> fields = null;
BytesReference source = null;
@@ -347,41 +209,14 @@ public final class ShardGetService extends AbstractIndexShardComponent {
fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
}
- // now, go and do the script thingy if needed
-
if (gFields != null && gFields.length > 0) {
- SearchLookup searchLookup = null;
for (String field : gFields) {
- Object value = null;
FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
if (fieldMapper == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
}
- } else if (!fieldMapper.fieldType().stored() && !fieldMapper.isGenerated()) {
- if (searchLookup == null) {
- searchLookup = new SearchLookup(mapperService, null, new String[]{type});
- LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(docIdAndVersion.context);
- searchLookup.source().setSource(source);
- leafSearchLookup.setDocument(docIdAndVersion.docId);
- }
-
- List<Object> values = searchLookup.source().extractRawValues(field);
- if (values.isEmpty() == false) {
- value = values;
- }
- }
-
- if (value != null) {
- if (fields == null) {
- fields = new HashMap<>(2);
- }
- if (value instanceof List) {
- fields.put(field, new GetField(field, (List) value));
- } else {
- fields.put(field, new GetField(field, Collections.singletonList(value)));
- }
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java
index 05d94ff919..6c1477d8d0 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java
@@ -47,28 +47,6 @@ import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
*/
public class AllFieldMapper extends MetadataFieldMapper {
- public interface IncludeInAll {
-
- /**
- * If {@code includeInAll} is not null then return a copy of this mapper
- * that will include values in the _all field according to {@code includeInAll}.
- */
- Mapper includeInAll(Boolean includeInAll);
-
- /**
- * If {@code includeInAll} is not null and not set on this mapper yet, then
- * return a copy of this mapper that will include values in the _all field
- * according to {@code includeInAll}.
- */
- Mapper includeInAllIfNotSet(Boolean includeInAll);
-
- /**
- * If {@code includeInAll} was already set on this mapper then return a copy
- * of this mapper that has {@code includeInAll} not set.
- */
- Mapper unsetIncludeInAll();
- }
-
public static final String NAME = "_all";
public static final String CONTENT_TYPE = "_all";
@@ -128,9 +106,9 @@ public class AllFieldMapper extends MetadataFieldMapper {
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,
ParserContext parserContext) throws MapperParsingException {
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
- builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
- builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
- builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
+ builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
+ builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
+ builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
// parseField below will happily parse the doc_values setting, but it is then never passed to
// the AllFieldMapper ctor in the builder since it is not valid. Here we validate
@@ -313,8 +291,4 @@ public class AllFieldMapper extends MetadataFieldMapper {
super.doMerge(mergeWith, updateAllTypes);
}
- @Override
- public boolean isGenerated() {
- return true;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java
index 99dd39ef2e..fa82176c6a 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java
@@ -89,7 +89,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected Boolean ignoreMalformed;
- public Builder(String name, GeoPointFieldType fieldType) {
+ public Builder(String name, MappedFieldType fieldType) {
super(name, fieldType, fieldType);
}
@@ -143,7 +143,16 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
public Y build(Mapper.BuilderContext context) {
- GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
+ // version 5.0 cuts over to LatLonPoint and no longer indexes geohash, or lat/lon separately
+ if (context.indexCreatedVersion().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ return buildLegacy(context);
+ }
+ return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
+ null, null, null, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
+ }
+
+ private Y buildLegacy(Mapper.BuilderContext context) {
+ LegacyGeoPointFieldType geoPointFieldType = (LegacyGeoPointFieldType)fieldType;
FieldMapper latMapper = null;
FieldMapper lonMapper = null;
@@ -161,9 +170,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
lonMapper = (LegacyDoubleFieldMapper) lonMapperBuilder.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
} else {
latMapper = new NumberFieldMapper.Builder(Names.LAT, NumberFieldMapper.NumberType.DOUBLE)
- .includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
+ .includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
lonMapper = new NumberFieldMapper.Builder(Names.LON, NumberFieldMapper.NumberType.DOUBLE)
- .includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
+ .includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
}
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
}
@@ -183,7 +192,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
context.path().remove();
return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
- latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
+ latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
}
}
@@ -191,8 +200,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder;
- if (parserContext.indexVersionCreated().before(Version.V_2_2_0)) {
- builder = new GeoPointFieldMapperLegacy.Builder(name);
+ Version indexVersionCreated = parserContext.indexVersionCreated();
+ if (indexVersionCreated.before(Version.V_2_2_0)) {
+ builder = new LegacyGeoPointFieldMapper.Builder(name);
+ } else if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ builder = new LatLonPointFieldMapper.Builder(name);
} else {
builder = new GeoPointFieldMapper.Builder(name);
}
@@ -202,47 +214,73 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
Map.Entry<String, Object> entry = iterator.next();
String propName = entry.getKey();
Object propNode = entry.getValue();
- if (propName.equals("lat_lon")) {
- deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed "
- + "in the next major release");
- builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
- iterator.remove();
- } else if (propName.equals("precision_step")) {
- deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed "
- + "in the next major release");
- builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
- iterator.remove();
- } else if (propName.equals("geohash")) {
- builder.enableGeoHash(XContentMapValues.lenientNodeBooleanValue(propNode));
- iterator.remove();
- } else if (propName.equals("geohash_prefix")) {
- builder.geoHashPrefix(XContentMapValues.lenientNodeBooleanValue(propNode));
- if (XContentMapValues.lenientNodeBooleanValue(propNode)) {
- builder.enableGeoHash(true);
- }
- iterator.remove();
- } else if (propName.equals("geohash_precision")) {
- if (propNode instanceof Integer) {
- builder.geoHashPrecision(XContentMapValues.nodeIntegerValue(propNode));
- } else {
- builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(propNode.toString()));
+ if (indexVersionCreated.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ if (propName.equals("lat_lon")) {
+ deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed "
+ + "in the next major release");
+ builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
+ iterator.remove();
+ } else if (propName.equals("precision_step")) {
+ deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed "
+ + "in the next major release");
+ builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
+ iterator.remove();
+ } else if (propName.equals("geohash")) {
+ deprecationLogger.deprecated(CONTENT_TYPE + " geohash parameter is deprecated and will be removed "
+ + "in the next major release");
+ builder.enableGeoHash(XContentMapValues.lenientNodeBooleanValue(propNode));
+ iterator.remove();
+ } else if (propName.equals("geohash_prefix")) {
+ deprecationLogger.deprecated(CONTENT_TYPE + " geohash_prefix parameter is deprecated and will be removed "
+ + "in the next major release");
+ builder.geoHashPrefix(XContentMapValues.lenientNodeBooleanValue(propNode));
+ if (XContentMapValues.lenientNodeBooleanValue(propNode)) {
+ builder.enableGeoHash(true);
+ }
+ iterator.remove();
+ } else if (propName.equals("geohash_precision")) {
+ deprecationLogger.deprecated(CONTENT_TYPE + " geohash_precision parameter is deprecated and will be removed "
+ + "in the next major release");
+ if (propNode instanceof Integer) {
+ builder.geoHashPrecision(XContentMapValues.nodeIntegerValue(propNode));
+ } else {
+ builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(propNode.toString()));
+ }
+ iterator.remove();
}
- iterator.remove();
- } else if (propName.equals(Names.IGNORE_MALFORMED)) {
+ }
+
+ if (propName.equals(Names.IGNORE_MALFORMED)) {
builder.ignoreMalformed(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
}
}
- if (builder instanceof GeoPointFieldMapperLegacy.Builder) {
- return GeoPointFieldMapperLegacy.parse((GeoPointFieldMapperLegacy.Builder) builder, node, parserContext);
+ if (builder instanceof LegacyGeoPointFieldMapper.Builder) {
+ return LegacyGeoPointFieldMapper.parse((LegacyGeoPointFieldMapper.Builder) builder, node, parserContext);
+ } else if (builder instanceof LatLonPointFieldMapper.Builder) {
+ return (LatLonPointFieldMapper.Builder) builder;
}
return (GeoPointFieldMapper.Builder) builder;
}
}
- public static class GeoPointFieldType extends MappedFieldType {
+ public abstract static class GeoPointFieldType extends MappedFieldType {
+ GeoPointFieldType() {
+ }
+
+ GeoPointFieldType(GeoPointFieldType ref) {
+ super(ref);
+ }
+
+ @Override
+ public String typeName() {
+ return CONTENT_TYPE;
+ }
+ }
+
+ public static class LegacyGeoPointFieldType extends GeoPointFieldType {
protected MappedFieldType geoHashFieldType;
protected int geoHashPrecision;
protected boolean geoHashPrefixEnabled;
@@ -250,9 +288,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected MappedFieldType latFieldType;
protected MappedFieldType lonFieldType;
- GeoPointFieldType() {}
+ LegacyGeoPointFieldType() {}
- GeoPointFieldType(GeoPointFieldType ref) {
+ LegacyGeoPointFieldType(LegacyGeoPointFieldType ref) {
super(ref);
this.geoHashFieldType = ref.geoHashFieldType; // copying ref is ok, this can never be modified
this.geoHashPrecision = ref.geoHashPrecision;
@@ -263,13 +301,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public MappedFieldType clone() {
- return new GeoPointFieldType(this);
+ return new LegacyGeoPointFieldType(this);
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) return false;
- GeoPointFieldType that = (GeoPointFieldType) o;
+ LegacyGeoPointFieldType that = (LegacyGeoPointFieldType) o;
return geoHashPrecision == that.geoHashPrecision &&
geoHashPrefixEnabled == that.geoHashPrefixEnabled &&
java.util.Objects.equals(geoHashFieldType, that.geoHashFieldType) &&
@@ -284,14 +322,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
@Override
- public String typeName() {
- return CONTENT_TYPE;
- }
-
- @Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
- GeoPointFieldType other = (GeoPointFieldType)fieldType;
+ LegacyGeoPointFieldType other = (LegacyGeoPointFieldType)fieldType;
if (isLatLonEnabled() != other.isLatLonEnabled()) {
conflicts.add("mapper [" + name() + "] has different [lat_lon]");
}
@@ -392,9 +425,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
this.ignoreMalformed = ignoreMalformed;
}
- @Override
- public GeoPointFieldType fieldType() {
- return (GeoPointFieldType) super.fieldType();
+
+
+ public LegacyGeoPointFieldType legacyFieldType() {
+ return (LegacyGeoPointFieldType) super.fieldType();
}
@Override
@@ -408,15 +442,22 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public Iterator<Mapper> iterator() {
+ if (this instanceof LatLonPointFieldMapper == false) {
+ return Iterators.concat(super.iterator(), legacyIterator());
+ }
+ return super.iterator();
+ }
+
+ public Iterator<Mapper> legacyIterator() {
List<Mapper> extras = new ArrayList<>();
- if (fieldType().isGeoHashEnabled()) {
+ if (legacyFieldType().isGeoHashEnabled()) {
extras.add(geoHashMapper);
}
- if (fieldType().isLatLonEnabled()) {
+ if (legacyFieldType().isLatLonEnabled()) {
extras.add(latMapper);
extras.add(lonMapper);
}
- return Iterators.concat(super.iterator(), extras.iterator());
+ return extras.iterator();
}
@Override
@@ -430,13 +471,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
protected void parse(ParseContext context, GeoPoint point, String geoHash) throws IOException {
- if (fieldType().isGeoHashEnabled()) {
+ if (legacyFieldType().isGeoHashEnabled()) {
if (geoHash == null) {
geoHash = GeoHashUtils.stringEncode(point.lon(), point.lat());
}
addGeoHashField(context, geoHash);
}
- if (fieldType().isLatLonEnabled()) {
+ if (legacyFieldType().isLatLonEnabled()) {
latMapper.parse(context.createExternalValueContext(point.lat()));
lonMapper.parse(context.createExternalValueContext(point.lon()));
}
@@ -511,8 +552,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
private void addGeoHashField(ParseContext context, String geoHash) throws IOException {
- int len = Math.min(fieldType().geoHashPrecision(), geoHash.length());
- int min = fieldType().isGeoHashPrefixEnabled() ? 1 : len;
+ LegacyGeoPointFieldType ft = (LegacyGeoPointFieldType)fieldType;
+ int len = Math.min(ft.geoHashPrecision(), geoHash.length());
+ int min = ft.isGeoHashPrefixEnabled() ? 1 : len;
for (int i = len; i >= min; i--) {
// side effect of this call is adding the field
@@ -531,23 +573,30 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
- if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
- builder.field("lat_lon", fieldType().isLatLonEnabled());
+ if (this instanceof LatLonPointFieldMapper == false) {
+ legacyDoXContentBody(builder, includeDefaults, params);
}
- if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) {
- builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
+ if (includeDefaults || ignoreMalformed.explicit()) {
+ builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
}
- if (includeDefaults || fieldType().isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
- builder.field("geohash", fieldType().isGeoHashEnabled());
+ }
+
+ protected void legacyDoXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ LegacyGeoPointFieldType ft = (LegacyGeoPointFieldType) fieldType;
+ if (includeDefaults || ft.isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
+ builder.field("lat_lon", ft.isLatLonEnabled());
}
- if (includeDefaults || fieldType().isGeoHashPrefixEnabled() != Defaults.ENABLE_GEOHASH_PREFIX) {
- builder.field("geohash_prefix", fieldType().isGeoHashPrefixEnabled());
+ if (ft.isLatLonEnabled() && (includeDefaults || ft.latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) {
+ builder.field("precision_step", ft.latFieldType().numericPrecisionStep());
}
- if (fieldType().isGeoHashEnabled() && (includeDefaults || fieldType().geoHashPrecision() != Defaults.GEO_HASH_PRECISION)) {
- builder.field("geohash_precision", fieldType().geoHashPrecision());
+ if (includeDefaults || ft.isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
+ builder.field("geohash", ft.isGeoHashEnabled());
}
- if (includeDefaults || ignoreMalformed.explicit()) {
- builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
+ if (includeDefaults || ft.isGeoHashPrefixEnabled() != Defaults.ENABLE_GEOHASH_PREFIX) {
+ builder.field("geohash_prefix", ft.isGeoHashPrefixEnabled());
+ }
+ if (ft.isGeoHashEnabled() && (includeDefaults || ft.geoHashPrecision() != Defaults.GEO_HASH_PRECISION)) {
+ builder.field("geohash_precision", ft.geoHashPrecision());
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java
index 13bb7d255a..09035bfa3c 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java
@@ -153,7 +153,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
if (searchAnalyzer != null) {
throw new MapperParsingException("analyzer on completion field [" + name + "] must be set when search_analyzer is set");
}
- indexAnalyzer = searchAnalyzer = parserContext.analysisService().analyzer("simple");
+ indexAnalyzer = searchAnalyzer = parserContext.getIndexAnalyzers().get("simple");
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
}
@@ -164,7 +164,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
}
private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(name);
if (analyzer == null) {
throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java
index 655af43710..590ca0f861 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java
@@ -206,7 +206,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
throw new MapperParsingException(
"analyzer on completion field [" + name + "] must be set when search_analyzer is set");
}
- indexAnalyzer = searchAnalyzer = parserContext.analysisService().analyzer("simple");
+ indexAnalyzer = searchAnalyzer = parserContext.getIndexAnalyzers().get("simple");
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
}
@@ -217,7 +217,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
}
private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(name);
if (analyzer == null) {
throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
index f3d1fbd53f..717f036155 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
@@ -37,7 +37,6 @@ import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.LocaleUtils;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.fielddata.IndexFieldData;
@@ -60,7 +59,7 @@ import java.util.concurrent.Callable;
import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter;
/** A {@link FieldMapper} for ip addresses. */
-public class DateFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class DateFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "date";
public static final FormatDateTimeFormatter DEFAULT_DATE_TIME_FORMATTER = Joda.forPattern(
@@ -119,9 +118,8 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
@Override
public DateFieldMapper build(BuilderContext context) {
setupFieldType(context);
- DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (DateFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
}
@@ -473,11 +471,13 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
MappedFieldType fieldType,
MappedFieldType defaultFieldType,
Explicit<Boolean> ignoreMalformed,
+ Boolean includeInAll,
Settings indexSettings,
MultiFields multiFields,
CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.ignoreMalformed = ignoreMalformed;
+ this.includeInAll = includeInAll;
}
@Override
@@ -496,39 +496,6 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
}
@Override
- public Mapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- DateFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- DateFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper unsetIncludeInAll() {
- if (includeInAll != null) {
- DateFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
String dateAsString;
if (context.externalValueSet()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
index a4d1a0c5e4..eb6d6a9a3e 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
@@ -32,7 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser;
import org.elasticsearch.search.internal.SearchContext;
@@ -147,11 +147,11 @@ public class DocumentMapper implements ToXContent {
}
MapperUtils.collect(this.mapping.root, newObjectMappers, newFieldMappers);
- final AnalysisService analysisService = mapperService.analysisService();
+ final IndexAnalyzers indexAnalyzers = mapperService.getIndexAnalyzers();
this.fieldMappers = new DocumentFieldMappers(newFieldMappers,
- analysisService.defaultIndexAnalyzer(),
- analysisService.defaultSearchAnalyzer(),
- analysisService.defaultSearchQuoteAnalyzer());
+ indexAnalyzers.getDefaultIndexAnalyzer(),
+ indexAnalyzers.getDefaultSearchAnalyzer(),
+ indexAnalyzers.getDefaultSearchQuoteAnalyzer());
Map<String, ObjectMapper> builder = new HashMap<>();
for (ObjectMapper objectMapper : newObjectMappers) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java
index f336fbb01a..2cdeed9f04 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java
@@ -22,14 +22,13 @@ package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@@ -44,7 +43,7 @@ import static java.util.Collections.unmodifiableMap;
public class DocumentMapperParser {
final MapperService mapperService;
- final AnalysisService analysisService;
+ final IndexAnalyzers indexAnalyzers;
private final SimilarityService similarityService;
private final Supplier<QueryShardContext> queryShardContextSupplier;
@@ -56,12 +55,12 @@ public class DocumentMapperParser {
private final Map<String, Mapper.TypeParser> typeParsers;
private final Map<String, MetadataFieldMapper.TypeParser> rootTypeParsers;
- public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService,
+ public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, IndexAnalyzers indexAnalyzers,
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings());
this.mapperService = mapperService;
- this.analysisService = analysisService;
+ this.indexAnalyzers = indexAnalyzers;
this.similarityService = similarityService;
this.queryShardContextSupplier = queryShardContextSupplier;
this.typeParsers = mapperRegistry.getMapperParsers();
@@ -70,7 +69,7 @@ public class DocumentMapperParser {
}
public Mapper.TypeParser.ParserContext parserContext(String type) {
- return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
+ return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
}
public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
index 80d59ec39f..60ca0c22cc 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
@@ -343,6 +343,12 @@ final class DocumentParser {
context = nestedContext(context, mapper);
}
+ // update the default value of include_in_all if necessary
+ Boolean includeInAll = mapper.includeInAll();
+ if (includeInAll != null) {
+ context = context.setIncludeInAllDefault(includeInAll);
+ }
+
// if we are at the end of the previous object, advance
if (token == XContentParser.Token.END_OBJECT) {
token = parser.nextToken();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
index e3f88b2231..9128002eb5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
@@ -246,6 +246,11 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
super(simpleName);
assert indexSettings != null;
this.indexCreatedVersion = Version.indexCreated(indexSettings);
+ if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_beta1)) {
+ if (simpleName.isEmpty()) {
+ throw new IllegalArgumentException("name cannot be empty string");
+ }
+ }
fieldType.freeze();
this.fieldType = fieldType;
defaultFieldType.freeze();
@@ -537,11 +542,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>();
// we disable the all in multi-field mappers
for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) {
- FieldMapper mapper = cursor.value;
- if (mapper instanceof AllFieldMapper.IncludeInAll) {
- mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
- }
- builder.put(cursor.key, mapper);
+ builder.put(cursor.key, cursor.value);
}
this.mappers = builder.build();
}
@@ -568,10 +569,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
FieldMapper mergeWithMapper = cursor.value;
FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
if (mergeIntoMapper == null) {
- // we disable the all in multi-field mappers
- if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
- mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
- }
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
} else {
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
@@ -668,14 +665,4 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
}
}
- /**
- * Fields might not be available before indexing, for example _all, token_count,...
- * When get is called and these fields are requested, this case needs special treatment.
- *
- * @return If the field is available before indexing or not.
- */
- public boolean isGenerated() {
- return false;
- }
-
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java
index 1b18652bce..7343963f09 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java
@@ -289,8 +289,4 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
return builder;
}
- @Override
- public boolean isGenerated() {
- return true;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
index c27ddc1811..655bf4aad0 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
@@ -48,7 +48,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
public static class Defaults extends BaseGeoPointFieldMapper.Defaults {
- public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
+ public static final GeoPointFieldType FIELD_TYPE = new LegacyGeoPointFieldType();
static {
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
@@ -127,4 +127,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
}
super.parse(context, point, geoHash);
}
+
+ @Override
+ public LegacyGeoPointFieldType fieldType() {
+ return (LegacyGeoPointFieldType) super.fieldType();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
index c5e8365760..69a8e06f85 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
@@ -50,7 +50,7 @@ import java.util.List;
import java.util.Map;
/** A {@link FieldMapper} for ip addresses. */
-public class IpFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class IpFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "ip";
@@ -81,9 +81,8 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
@Override
public IpFieldMapper build(BuilderContext context) {
setupFieldType(context);
- IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (IpFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
}
@@ -261,11 +260,13 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
MappedFieldType fieldType,
MappedFieldType defaultFieldType,
Explicit<Boolean> ignoreMalformed,
+ Boolean includeInAll,
Settings indexSettings,
MultiFields multiFields,
CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.ignoreMalformed = ignoreMalformed;
+ this.includeInAll = includeInAll;
}
@Override
@@ -284,39 +285,6 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
}
@Override
- public Mapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- IpFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- IpFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper unsetIncludeInAll() {
- if (includeInAll != null) {
- IpFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
Object addressAsObject;
if (context.externalValueSet()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
index 1874c86ac7..204e61aabe 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
@@ -24,6 +24,7 @@ import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -32,19 +33,29 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import static java.util.Collections.unmodifiableList;
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
/**
* A field mapper for keywords. This mapper accepts strings and indexes them as-is.
*/
-public final class KeywordFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public final class KeywordFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "keyword";
+ private static final List<String> SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING = unmodifiableList(Arrays.asList(
+ "type",
+ // common keyword parameters, for which the upgrade is straightforward
+ "index", "store", "doc_values", "omit_norms", "norms", "boost", "fields", "copy_to",
+ "include_in_all", "ignore_above", "index_options", "similarity"));
+
public static class Defaults {
public static final MappedFieldType FIELD_TYPE = new KeywordFieldType();
@@ -94,16 +105,38 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
@Override
public KeywordFieldMapper build(BuilderContext context) {
setupFieldType(context);
- KeywordFieldMapper fieldMapper = new KeywordFieldMapper(
- name, fieldType, defaultFieldType, ignoreAbove,
+ return new KeywordFieldMapper(
+ name, fieldType, defaultFieldType, ignoreAbove, includeInAll,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return fieldMapper.includeInAll(includeInAll);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1)) {
+ // Downgrade "keyword" to "string" in indexes created in 2.x so you can use modern syntax against old indexes
+ Set<String> unsupportedParameters = new HashSet<>(node.keySet());
+ unsupportedParameters.removeAll(SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING);
+ if (false == SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING.containsAll(node.keySet())) {
+ throw new IllegalArgumentException("Automatic downgrade from [keyword] to [string] failed because parameters "
+ + unsupportedParameters + " are not supported for automatic downgrades.");
+ }
+ { // Downgrade "index"
+ Object index = node.get("index");
+ if (index == null || Boolean.TRUE.equals(index)) {
+ index = "not_analyzed";
+ } else if (Boolean.FALSE.equals(index)) {
+ index = "no";
+ } else {
+ throw new IllegalArgumentException(
+ "Can't parse [index] value [" + index + "] for field [" + name + "], expected [true] or [false]");
+ }
+ node.put("index", index);
+ }
+
+ return new StringFieldMapper.TypeParser().parse(name, node, parserContext);
+ }
KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name);
parseField(builder, name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
@@ -177,10 +210,11 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
private int ignoreAbove;
protected KeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- int ignoreAbove, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ int ignoreAbove, Boolean includeInAll, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0;
this.ignoreAbove = ignoreAbove;
+ this.includeInAll = includeInAll;
}
/** Values that have more chars than the return value of this method will
@@ -201,39 +235,6 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
}
@Override
- public KeywordFieldMapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- KeywordFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public KeywordFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- KeywordFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public KeywordFieldMapper unsetIncludeInAll() {
- if (includeInAll != null) {
- KeywordFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
final String value;
if (context.externalValueSet()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java
new file mode 100644
index 0000000000..647dd315a2
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.document.LatLonDocValuesField;
+import org.apache.lucene.document.LatLonPoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.plain.AbstractLatLonPointDVIndexFieldData;
+import org.elasticsearch.index.query.QueryShardContext;
+import org.elasticsearch.index.query.QueryShardException;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Field Mapper for geo_point types.
+ *
+ * Uses lucene 6 LatLonPoint encoding
+ */
+public class LatLonPointFieldMapper extends BaseGeoPointFieldMapper {
+ public static final String CONTENT_TYPE = "geo_point";
+ public static final Version LAT_LON_FIELD_VERSION = Version.V_5_0_0_beta1;
+
+ public static class Defaults extends BaseGeoPointFieldMapper.Defaults {
+ public static final LatLonPointFieldType FIELD_TYPE = new LatLonPointFieldType();
+
+ static {
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setHasDocValues(true);
+ FIELD_TYPE.setDimensions(2, Integer.BYTES);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends BaseGeoPointFieldMapper.Builder<Builder, LatLonPointFieldMapper> {
+ public Builder(String name) {
+ super(name, Defaults.FIELD_TYPE);
+ }
+
+ @Override
+ public LatLonPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
+ MappedFieldType defaultFieldType, Settings indexSettings,
+ FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
+ MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
+ CopyTo copyTo) {
+ setupFieldType(context);
+ return new LatLonPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, multiFields,
+ ignoreMalformed, copyTo);
+ }
+
+ @Override
+ public LatLonPointFieldMapper build(BuilderContext context) {
+ return super.build(context);
+ }
+ }
+
+ public static class TypeParser extends BaseGeoPointFieldMapper.TypeParser {
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
+ throws MapperParsingException {
+ return super.parse(name, node, parserContext);
+ }
+ }
+
+ public LatLonPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
+ Settings indexSettings, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
+ CopyTo copyTo) {
+ super(simpleName, fieldType, defaultFieldType, indexSettings, null, null, null, multiFields, ignoreMalformed, copyTo);
+ }
+
+ public static class LatLonPointFieldType extends GeoPointFieldType {
+ LatLonPointFieldType() {
+ }
+
+ LatLonPointFieldType(LatLonPointFieldType ref) {
+ super(ref);
+ }
+
+ @Override
+ public String typeName() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public MappedFieldType clone() {
+ return new LatLonPointFieldType(this);
+ }
+
+ @Override
+ public IndexFieldData.Builder fielddataBuilder() {
+ failIfNoDocValues();
+ return new AbstractLatLonPointDVIndexFieldData.Builder();
+ }
+
+ @Override
+ public Query termQuery(Object value, QueryShardContext context) {
+ throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead: ["
+ + name() + "]");
+ }
+ }
+
+ @Override
+ protected void parse(ParseContext originalContext, GeoPoint point, String geoHash) throws IOException {
+ // Geopoint fields, by default, will not be included in _all
+ final ParseContext context = originalContext.setIncludeInAllDefault(false);
+
+ if (ignoreMalformed.value() == false) {
+ if (point.lat() > 90.0 || point.lat() < -90.0) {
+ throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
+ }
+ if (point.lon() > 180.0 || point.lon() < -180) {
+ throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
+ }
+ } else {
+ GeoUtils.normalizePoint(point);
+ }
+ if (fieldType().indexOptions() != IndexOptions.NONE) {
+ context.doc().add(new LatLonPoint(fieldType().name(), point.lat(), point.lon()));
+ }
+ if (fieldType().stored()) {
+ context.doc().add(new StoredField(fieldType().name(), point.toString()));
+ }
+ if (fieldType.hasDocValues()) {
+ context.doc().add(new LatLonDocValuesField(fieldType().name(), point.lat(), point.lon()));
+ }
+ // if the mapping contains multifields then use the geohash string
+ if (multiFields.iterator().hasNext()) {
+ multiFields.parse(this, context.createExternalValueContext(point.geohash()));
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java
index a7f8f85f8f..2c63806ebb 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java
@@ -75,9 +75,8 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyByteFieldMapper fieldMapper = new LegacyByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyByteFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ coerce(context), includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -176,9 +175,9 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyByteFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java
index 58fbe2895d..29689d06df 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java
@@ -36,7 +36,6 @@ import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.LocaleUtils;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -116,9 +115,8 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
}
setupFieldType(context);
fieldType.setNullValue(nullValue);
- LegacyDateFieldMapper fieldMapper = new LegacyDateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyDateFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyDateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ coerce(context), includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -467,8 +465,8 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyDateFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit<Boolean> ignoreMalformed,Explicit<Boolean> coerce,
- Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ Boolean includeInAll, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java
index 0e72716119..07e459e8ea 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java
@@ -78,9 +78,8 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyDoubleFieldMapper fieldMapper = new LegacyDoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
- context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyDoubleFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyDoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
+ includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -187,8 +186,8 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyDoubleFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit<Boolean> ignoreMalformed,
- Explicit<Boolean> coerce, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ Explicit<Boolean> coerce, Boolean includeInAll, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java
index 831ac973de..3fbc639ea6 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java
@@ -77,9 +77,8 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyFloatFieldMapper fieldMapper = new LegacyFloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
- context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyFloatFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyFloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
+ includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -171,9 +170,9 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyFloatFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java
index 4d09e6f983..99ca07b06b 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapperLegacy.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java
@@ -49,7 +49,7 @@ import java.util.Map;
* "lon" : 2.1
* }
*/
-public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implements ArrayValueMapperParser {
+public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implements ArrayValueMapperParser {
public static final String CONTENT_TYPE = "geo_point";
@@ -58,9 +58,9 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
}
public static class Defaults extends BaseGeoPointFieldMapper.Defaults{
- public static final Explicit<Boolean> COERCE = new Explicit(false, false);
+ public static final Explicit<Boolean> COERCE = new Explicit<>(false, false);
- public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
+ public static final GeoPointFieldType FIELD_TYPE = new LegacyGeoPointFieldType();
static {
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
@@ -73,7 +73,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
/**
* Concrete builder for legacy GeoPointField
*/
- public static class Builder extends BaseGeoPointFieldMapper.Builder<Builder, GeoPointFieldMapperLegacy> {
+ public static class Builder extends BaseGeoPointFieldMapper.Builder<Builder, LegacyGeoPointFieldMapper> {
private Boolean coerce;
@@ -98,25 +98,26 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
}
@Override
- public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
- MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper,
- FieldMapper lonMapper, FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
- CopyTo copyTo) {
+ public LegacyGeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
+ MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
+ FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
+ CopyTo copyTo) {
fieldType.setTokenized(false);
setupFieldType(context);
fieldType.setHasDocValues(false);
defaultFieldType.setHasDocValues(false);
- return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
- geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo);
+ return new LegacyGeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper,
+ lonMapper, geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo);
}
@Override
- public GeoPointFieldMapperLegacy build(BuilderContext context) {
+ public LegacyGeoPointFieldMapper build(BuilderContext context) {
return super.build(context);
}
}
- public static Builder parse(Builder builder, Map<String, Object> node, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
+ public static Builder parse(Builder builder, Map<String, Object> node, Mapper.TypeParser.ParserContext parserContext)
+ throws MapperParsingException {
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
String propName = entry.getKey();
@@ -154,7 +155,8 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
return instance;
}
- /** Get an instance based on the expected precision. Here are examples of the number of required bytes per value depending on the
+ /** Get an instance based on the expected precision. Here are examples of the number of required bytes per value
+ * depending on the
* expected precision:<ul>
* <li>1km: 4 bytes</li>
* <li>3m: 6 bytes</li>
@@ -181,13 +183,15 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
this.numBytes = numBytes;
this.numBytesPerCoordinate = numBytes / 2;
this.factor = Math.pow(2, - numBytesPerCoordinate * 8 + 9);
- assert (1L << (numBytesPerCoordinate * 8 - 1)) * factor > 180 && (1L << (numBytesPerCoordinate * 8 - 2)) * factor < 180 : numBytesPerCoordinate + " " + factor;
+ assert (1L << (numBytesPerCoordinate * 8 - 1)) * factor > 180 && (1L << (numBytesPerCoordinate * 8 - 2))
+ * factor < 180 : numBytesPerCoordinate + " " + factor;
if (numBytes == MAX_NUM_BYTES) {
// no precision loss compared to a double
precision = new DistanceUnit.Distance(0, DistanceUnit.DEFAULT);
} else {
+ // factor/2 because we use Math.round instead of a cast to convert the double to a long
precision = new DistanceUnit.Distance(
- GeoDistance.PLANE.calculate(0, 0, factor / 2, factor / 2, DistanceUnit.DEFAULT), // factor/2 because we use Math.round instead of a cast to convert the double to a long
+ GeoDistance.PLANE.calculate(0, 0, factor / 2, factor / 2, DistanceUnit.DEFAULT),
DistanceUnit.DEFAULT);
}
}
@@ -256,10 +260,9 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
protected Explicit<Boolean> coerce;
- public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
- FieldMapper latMapper, FieldMapper lonMapper,
- FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
- Explicit<Boolean> coerce, CopyTo copyTo) {
+ public LegacyGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
+ Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
+ MultiFields multiFields, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
ignoreMalformed, copyTo);
this.coerce = coerce;
@@ -269,7 +272,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
- GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith;
+ LegacyGeoPointFieldMapper gpfmMergeWith = (LegacyGeoPointFieldMapper) mergeWith;
if (gpfmMergeWith.coerce.explicit()) {
if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) {
throw new IllegalArgumentException("mapper [" + fieldType().name() + "] has different [coerce]");
@@ -301,14 +304,16 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
}
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
- Field field = new Field(fieldType().name(), Double.toString(point.lat()) + ',' + Double.toString(point.lon()), fieldType());
+ Field field = new Field(fieldType().name(), Double.toString(point.lat()) + ','
+ + Double.toString(point.lon()), fieldType());
context.doc().add(field);
}
super.parse(context, point, geoHash);
if (fieldType().hasDocValues()) {
- CustomGeoPointDocValuesField field = (CustomGeoPointDocValuesField) context.doc().getByKey(fieldType().name());
+ CustomGeoPointDocValuesField field = (CustomGeoPointDocValuesField) context.doc()
+ .getByKey(fieldType().name());
if (field == null) {
field = new CustomGeoPointDocValuesField(fieldType().name(), point.lat(), point.lon());
context.doc().addWithKey(fieldType().name(), field);
@@ -326,6 +331,11 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
}
}
+ @Override
+ public LegacyGeoPointFieldType fieldType() {
+ return (LegacyGeoPointFieldType) super.fieldType();
+ }
+
public static class CustomGeoPointDocValuesField extends CustomDocValuesField {
private final ObjectHashSet<GeoPoint> points;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java
index 6a840813ab..65b9b65eaf 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java
@@ -81,10 +81,9 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyIntegerFieldMapper fieldMapper = new LegacyIntegerFieldMapper(name, fieldType, defaultFieldType,
- ignoreMalformed(context), coerce(context),
+ return new LegacyIntegerFieldMapper(name, fieldType, defaultFieldType,
+ ignoreMalformed(context), coerce(context), includeInAll,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyIntegerFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
protected int maxPrecisionStep() {
@@ -175,9 +174,9 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyIntegerFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java
index 4c76e3ec82..699124a4c0 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java
@@ -117,9 +117,8 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyIpFieldMapper fieldMapper = new LegacyIpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyIpFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyIpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ coerce(context), includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -267,9 +266,9 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyIpFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
private static long parseValue(Object value) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java
index ffad4deeb5..feb3328227 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpIndexFieldData.java
@@ -18,33 +18,33 @@
*/
package org.elasticsearch.index.mapper;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
-import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
import org.elasticsearch.search.MultiValueMode;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+
final class LegacyIpIndexFieldData implements IndexFieldData<AtomicFieldData> {
protected final Index index;
protected final String fieldName;
- protected final ESLogger logger;
+ protected final Logger logger;
public LegacyIpIndexFieldData(Index index, String fieldName) {
this.index = index;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java
index b1e156e263..4661d1cd36 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java
@@ -81,9 +81,9 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyLongFieldMapper fieldMapper = new LegacyLongFieldMapper(name, fieldType, defaultFieldType,
- ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyLongFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new LegacyLongFieldMapper(name, fieldType, defaultFieldType,
+ ignoreMalformed(context), coerce(context), includeInAll, context.indexSettings(),
+ multiFieldsBuilder.build(this, context), copyTo);
}
@Override
@@ -175,9 +175,9 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyLongFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java
index b1d3ead846..f377883aa2 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java
@@ -40,7 +40,7 @@ import org.joda.time.DateTimeZone;
/**
*
*/
-public abstract class LegacyNumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public abstract class LegacyNumberFieldMapper extends FieldMapper {
// this is private since it has a different default
private static final Setting<Boolean> COERCE_SETTING =
Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope);
@@ -158,12 +158,13 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
protected Explicit<Boolean> coerce;
protected LegacyNumberFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Settings indexSettings,
- MultiFields multiFields, CopyTo copyTo) {
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
assert fieldType.tokenized() == false;
this.ignoreMalformed = ignoreMalformed;
this.coerce = coerce;
+ this.includeInAll = includeInAll;
}
@Override
@@ -172,39 +173,6 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
}
@Override
- public Mapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- LegacyNumberFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- LegacyNumberFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper unsetIncludeInAll() {
- if (includeInAll != null) {
- LegacyNumberFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
RuntimeException e = null;
try {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java
index 39e0080178..b42ec620ae 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java
@@ -34,7 +34,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexFieldData;
@@ -78,10 +77,9 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyShortFieldMapper fieldMapper = new LegacyShortFieldMapper(name, fieldType, defaultFieldType,
- ignoreMalformed(context), coerce(context),
+ return new LegacyShortFieldMapper(name, fieldType, defaultFieldType,
+ ignoreMalformed(context), coerce(context), includeInAll,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyShortFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
@@ -181,9 +179,9 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper {
}
protected LegacyShortFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyTokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyTokenCountFieldMapper.java
index 48244ffb37..7981b40021 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyTokenCountFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyTokenCountFieldMapper.java
@@ -73,10 +73,9 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
throw new IllegalStateException("Cannot use legacy numeric types after 5.0");
}
setupFieldType(context);
- LegacyTokenCountFieldMapper fieldMapper = new LegacyTokenCountFieldMapper(name, fieldType, defaultFieldType,
- ignoreMalformed(context), coerce(context), context.indexSettings(),
+ return new LegacyTokenCountFieldMapper(name, fieldType, defaultFieldType,
+ ignoreMalformed(context), coerce(context), includeInAll, context.indexSettings(),
analyzer, multiFieldsBuilder.build(this, context), copyTo);
- return (LegacyTokenCountFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
@@ -98,7 +97,7 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
builder.nullValue(nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("analyzer")) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
@@ -117,8 +116,8 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
private NamedAnalyzer analyzer;
protected LegacyTokenCountFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit<Boolean> ignoreMalformed,
- Explicit<Boolean> coerce, Settings indexSettings, NamedAnalyzer analyzer, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, indexSettings, multiFields, copyTo);
+ Explicit<Boolean> coerce, Boolean includeInAll, Settings indexSettings, NamedAnalyzer analyzer, MultiFields multiFields, CopyTo copyTo) {
+ super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, includeInAll, indexSettings, multiFields, copyTo);
this.analyzer = analyzer;
}
@@ -188,9 +187,4 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
builder.field("analyzer", analyzer());
}
- @Override
- public boolean isGenerated() {
- return true;
- }
-
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
index 9a434cc8a3..8796f8539d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
@@ -133,7 +133,7 @@ public abstract class MappedFieldType extends FieldType {
eagerGlobalOrdinals, similarity == null ? null : similarity.name(), nullValue, nullValueAsString);
}
- // norelease: we need to override freeze() and add safety checks that all settings are actually set
+ // TODO: we need to override freeze() and add safety checks that all settings are actually set
/** Returns the name of this type, as would be specified in mapping properties */
public abstract String typeName();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
index a6c3cdb901..0692856642 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -22,15 +22,14 @@ package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.index.analysis.AnalysisService;
-import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityProvider;
import java.util.Map;
+import java.util.Objects;
import java.util.function.Function;
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
@@ -86,7 +85,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final String type;
- private final AnalysisService analysisService;
+ private final IndexAnalyzers indexAnalyzers;
private final Function<String, SimilarityProvider> similarityLookupService;
@@ -100,11 +99,11 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final QueryShardContext queryShardContext;
- public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
+ public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function<String, SimilarityProvider> similarityLookupService,
MapperService mapperService, Function<String, TypeParser> typeParsers,
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) {
this.type = type;
- this.analysisService = analysisService;
+ this.indexAnalyzers = indexAnalyzers;
this.similarityLookupService = similarityLookupService;
this.mapperService = mapperService;
this.typeParsers = typeParsers;
@@ -117,8 +116,8 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return type;
}
- public AnalysisService analysisService() {
- return analysisService;
+ public IndexAnalyzers getIndexAnalyzers() {
+ return indexAnalyzers;
}
public SimilarityProvider getSimilarity(String name) {
@@ -160,7 +159,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
static class MultiFieldParserContext extends ParserContext {
MultiFieldParserContext(ParserContext in) {
- super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
+ super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
}
}
@@ -172,6 +171,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final String simpleName;
public Mapper(String simpleName) {
+ Objects.requireNonNull(simpleName);
this.simpleName = simpleName;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index 43bf505da4..2da082cba7 100755
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.elasticsearch.ElasticsearchGenerationException;
@@ -35,7 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
@@ -99,7 +100,7 @@ public class MapperService extends AbstractIndexComponent {
@Deprecated
public static final String PERCOLATOR_LEGACY_TYPE_NAME = ".percolator";
- private final AnalysisService analysisService;
+ private final IndexAnalyzers indexAnalyzers;
/**
* Will create types automatically if they do not exists in the mapping definition yet
@@ -126,16 +127,16 @@ public class MapperService extends AbstractIndexComponent {
final MapperRegistry mapperRegistry;
- public MapperService(IndexSettings indexSettings, AnalysisService analysisService,
+ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers,
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
super(indexSettings);
- this.analysisService = analysisService;
+ this.indexAnalyzers = indexAnalyzers;
this.fieldTypes = new FieldTypeLookup();
- this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry, queryShardContextSupplier);
- this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
- this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
- this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
+ this.documentParser = new DocumentMapperParser(indexSettings, this, indexAnalyzers, similarityService, mapperRegistry, queryShardContextSupplier);
+ this.indexAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultIndexAnalyzer(), p -> p.indexAnalyzer());
+ this.searchAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchAnalyzer(), p -> p.searchAnalyzer());
+ this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
this.mapperRegistry = mapperRegistry;
this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
@@ -170,8 +171,8 @@ public class MapperService extends AbstractIndexComponent {
};
}
- public AnalysisService analysisService() {
- return this.analysisService;
+ public IndexAnalyzers getIndexAnalyzers() {
+ return this.indexAnalyzers;
}
public DocumentMapperParser documentMapperParser() {
@@ -216,7 +217,10 @@ public class MapperService extends AbstractIndexComponent {
}
}
} catch (Exception e) {
- logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index(), mappingType, mappingSource);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource),
+ e);
throw e;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
index 6eddf07b97..886b93fcf0 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
@@ -60,7 +60,7 @@ import java.util.Map;
import java.util.Objects;
/** A {@link FieldMapper} for numeric types: byte, short, int, long, float and double. */
-public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class NumberFieldMapper extends FieldMapper {
// this is private since it has a different default
static final Setting<Boolean> COERCE_SETTING =
@@ -114,10 +114,8 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
@Override
public NumberFieldMapper build(BuilderContext context) {
setupFieldType(context);
- NumberFieldMapper fieldMapper =
- new NumberFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (NumberFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new NumberFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ coerce(context), includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
}
@@ -871,12 +869,14 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
MappedFieldType defaultFieldType,
Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce,
+ Boolean includeInAll,
Settings indexSettings,
MultiFields multiFields,
CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.ignoreMalformed = ignoreMalformed;
this.coerce = coerce;
+ this.includeInAll = includeInAll;
}
@Override
@@ -895,40 +895,9 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
@Override
- public Mapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- NumberFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- NumberFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper unsetIncludeInAll() {
- if (includeInAll != null) {
- NumberFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ final boolean includeInAll = context.includeInAll(this.includeInAll, this);
+
XContentParser parser = context.parser();
Object value;
Number numericValue = null;
@@ -941,18 +910,20 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
&& parser.textLength() == 0) {
value = null;
} else {
- value = parser.textOrNull();
- if (value != null) {
- try {
- numericValue = fieldType().type.parse(parser, coerce.value());
- } catch (IllegalArgumentException e) {
- if (ignoreMalformed.value()) {
- return;
- } else {
- throw e;
- }
+ try {
+ numericValue = fieldType().type.parse(parser, coerce.value());
+ } catch (IllegalArgumentException e) {
+ if (ignoreMalformed.value()) {
+ return;
+ } else {
+ throw e;
}
}
+ if (includeInAll) {
+ value = parser.textOrNull(); // preserve formatting
+ } else {
+ value = numericValue;
+ }
}
if (value == null) {
@@ -967,7 +938,7 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
numericValue = fieldType().type.parse(value);
}
- if (context.includeInAll(includeInAll, this)) {
+ if (includeInAll) {
context.allEntries().addText(fieldType().name(), value.toString(), fieldType().boost());
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
index 9d6dcafc0c..e843e7170d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
@@ -24,8 +24,8 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -47,7 +47,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenien
/**
*
*/
-public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Cloneable {
+public class ObjectMapper extends Mapper implements Cloneable {
public static final String CONTENT_TYPE = "object";
public static final String NESTED_CONTENT_TYPE = "nested";
@@ -154,21 +154,22 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
}
context.path().remove();
- ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings());
- objectMapper = objectMapper.includeInAllIfNotSet(includeInAll);
+ ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic,
+ includeInAll, mappers, context.indexSettings());
return (Y) objectMapper;
}
- protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
- return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers);
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,
+ Boolean includeInAll, Map<String, Mapper> mappers, @Nullable Settings settings) {
+ return new ObjectMapper(name, fullPath, enabled, nested, dynamic, includeInAll, mappers, settings);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
- ObjectMapper.Builder builder = createBuilder(name);
+ ObjectMapper.Builder builder = new Builder(name);
parseNested(name, node, builder);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
@@ -300,9 +301,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
}
- protected Builder createBuilder(String name) {
- return new Builder(name);
- }
}
private final String fullPath;
@@ -322,12 +320,21 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
private volatile CopyOnWriteHashMap<String, Mapper> mappers;
- ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers) {
+ ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,
+ Boolean includeInAll, Map<String, Mapper> mappers, Settings settings) {
super(name);
+ assert settings != null;
+ Version indexCreatedVersion = Version.indexCreated(settings);
+ if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_beta1)) {
+ if (name.isEmpty()) {
+ throw new IllegalArgumentException("name cannot be empty string");
+ }
+ }
this.fullPath = fullPath;
this.enabled = enabled;
this.nested = nested;
this.dynamic = dynamic;
+ this.includeInAll = includeInAll;
if (mappers == null) {
this.mappers = new CopyOnWriteHashMap<>();
} else {
@@ -373,68 +380,19 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return mappers.get(field);
}
- @Override
- public ObjectMapper includeInAll(Boolean includeInAll) {
- if (includeInAll == null) {
- return this;
- }
-
- ObjectMapper clone = clone();
- clone.includeInAll = includeInAll;
- // when called from outside, apply this on all the inner mappers
- for (Mapper mapper : clone.mappers.values()) {
- if (mapper instanceof AllFieldMapper.IncludeInAll) {
- clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll));
- }
- }
- return clone;
- }
-
- @Override
- public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll == null || this.includeInAll != null) {
- return this;
- }
-
- ObjectMapper clone = clone();
- clone.includeInAll = includeInAll;
- // when called from outside, apply this on all the inner mappers
- for (Mapper mapper : clone.mappers.values()) {
- if (mapper instanceof AllFieldMapper.IncludeInAll) {
- clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll));
- }
- }
- return clone;
- }
-
- @Override
- public ObjectMapper unsetIncludeInAll() {
- if (includeInAll == null) {
- return this;
- }
- ObjectMapper clone = clone();
- clone.includeInAll = null;
- // when called from outside, apply this on all the inner mappers
- for (Mapper mapper : mappers.values()) {
- if (mapper instanceof AllFieldMapper.IncludeInAll) {
- clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll());
- }
- }
- return clone;
- }
-
public Nested nested() {
return this.nested;
}
+ public Boolean includeInAll() {
+ return includeInAll;
+ }
+
public Query nestedTypeFilter() {
return this.nestedTypeFilter;
}
protected void putMapper(Mapper mapper) {
- if (mapper instanceof AllFieldMapper.IncludeInAll) {
- mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
- }
mappers = mappers.copyAndPut(mapper.simpleName(), mapper);
}
@@ -477,6 +435,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
}
}
+ this.includeInAll = mergeWith.includeInAll;
if (mergeWith.dynamic != null) {
this.dynamic = mergeWith.dynamic;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
index 477f88cc53..7ff5c4a37f 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
@@ -29,15 +29,11 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.all.AllEntries;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.index.analysis.AnalysisService;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-/**
- *
- */
public abstract class ParseContext {
/** Fork of {@link org.apache.lucene.document.Document} with additional functionality. */
@@ -243,11 +239,6 @@ public abstract class ParseContext {
}
@Override
- public AnalysisService analysisService() {
- return in.analysisService();
- }
-
- @Override
public MapperService mapperService() {
return in.mapperService();
}
@@ -386,11 +377,6 @@ public abstract class ParseContext {
}
@Override
- public AnalysisService analysisService() {
- return docMapperParser.analysisService;
- }
-
- @Override
public MapperService mapperService() {
return docMapperParser.mapperService;
}
@@ -423,6 +409,22 @@ public abstract class ParseContext {
public abstract DocumentMapperParser docMapperParser();
+ /** Return a view of this {@link ParseContext} that changes the return
+ * value of {@link #getIncludeInAllDefault()}. */
+ public final ParseContext setIncludeInAllDefault(boolean includeInAll) {
+ return new FilterParseContext(this) {
+ @Override
+ public Boolean getIncludeInAllDefault() {
+ return includeInAll;
+ }
+ };
+ }
+
+ /** Whether field values should be added to the _all field by default. */
+ public Boolean getIncludeInAllDefault() {
+ return null;
+ }
+
/**
* Return a new context that will be within a copy-to operation.
*/
@@ -509,8 +511,6 @@ public abstract class ParseContext {
public abstract DocumentMapper docMapper();
- public abstract AnalysisService analysisService();
-
public abstract MapperService mapperService();
public abstract Field version();
@@ -526,7 +526,7 @@ public abstract class ParseContext {
* is <tt>false</tt>. If its enabled, then will return <tt>true</tt> only if the specific flag is <tt>null</tt> or
* its actual value (so, if not set, defaults to "true") and the field is indexed.
*/
- private boolean includeInAll(Boolean specificIncludeInAll, boolean indexed) {
+ private boolean includeInAll(Boolean includeInAll, boolean indexed) {
if (isWithinCopyTo()) {
return false;
}
@@ -536,11 +536,14 @@ public abstract class ParseContext {
if (!docMapper().allFieldMapper().enabled()) {
return false;
}
+ if (includeInAll == null) {
+ includeInAll = getIncludeInAllDefault();
+ }
// not explicitly set
- if (specificIncludeInAll == null) {
+ if (includeInAll == null) {
return indexed;
}
- return specificIncludeInAll;
+ return includeInAll;
}
public abstract AllEntries allEntries();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
index 81ca392a50..85367e624d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.mapper;
import org.apache.lucene.document.Field;
+import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.mapper.ParseContext.Document;
@@ -32,7 +33,8 @@ public class ParsedDocument {
private final Field version;
- private final String uid, id, type;
+ private final String id, type;
+ private final BytesRef uid;
private final String routing;
@@ -48,11 +50,12 @@ public class ParsedDocument {
private String parent;
- public ParsedDocument(Field version, String id, String type, String routing, long timestamp, long ttl, List<Document> documents, BytesReference source, Mapping dynamicMappingsUpdate) {
+ public ParsedDocument(Field version, String id, String type, String routing, long timestamp, long ttl, List<Document> documents,
+ BytesReference source, Mapping dynamicMappingsUpdate) {
this.version = version;
this.id = id;
this.type = type;
- this.uid = Uid.createUid(type, id);
+ this.uid = Uid.createUidAsBytes(type, id);
this.routing = routing;
this.timestamp = timestamp;
this.ttl = ttl;
@@ -64,7 +67,7 @@ public class ParsedDocument {
return version;
}
- public String uid() {
+ public BytesRef uid() {
return uid;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
index f419d525e1..bf343a6e0a 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
+import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
@@ -30,14 +31,13 @@ import org.elasticsearch.index.mapper.DynamicTemplate.XContentFieldType;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
+import java.util.Collection;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Set;
-import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter;
/**
@@ -57,78 +57,43 @@ public class RootObjectMapper extends ObjectMapper {
public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> {
- protected final List<DynamicTemplate> dynamicTemplates = new ArrayList<>();
-
- // we use this to filter out seen date formats, because we might get duplicates during merging
- protected Set<String> seenDateFormats = new HashSet<>();
- protected List<FormatDateTimeFormatter> dynamicDateTimeFormatters = new ArrayList<>();
-
- protected boolean dateDetection = Defaults.DATE_DETECTION;
- protected boolean numericDetection = Defaults.NUMERIC_DETECTION;
+ protected Explicit<DynamicTemplate[]> dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false);
+ protected Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
+ protected Explicit<Boolean> dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false);
+ protected Explicit<Boolean> numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false);
public Builder(String name) {
super(name);
this.builder = this;
}
- public Builder noDynamicDateTimeFormatter() {
- this.dynamicDateTimeFormatters = null;
- return builder;
- }
-
- public Builder dynamicDateTimeFormatter(Iterable<FormatDateTimeFormatter> dateTimeFormatters) {
- for (FormatDateTimeFormatter dateTimeFormatter : dateTimeFormatters) {
- if (!seenDateFormats.contains(dateTimeFormatter.format())) {
- seenDateFormats.add(dateTimeFormatter.format());
- this.dynamicDateTimeFormatters.add(dateTimeFormatter);
- }
- }
- return builder;
- }
-
- public Builder add(DynamicTemplate dynamicTemplate) {
- this.dynamicTemplates.add(dynamicTemplate);
+ public Builder dynamicDateTimeFormatter(Collection<FormatDateTimeFormatter> dateTimeFormatters) {
+ this.dynamicDateTimeFormatters = new Explicit<>(dateTimeFormatters.toArray(new FormatDateTimeFormatter[0]), true);
return this;
}
- public Builder add(DynamicTemplate... dynamicTemplate) {
- for (DynamicTemplate template : dynamicTemplate) {
- this.dynamicTemplates.add(template);
- }
+ public Builder dynamicTemplates(Collection<DynamicTemplate> templates) {
+ this.dynamicTemplates = new Explicit<>(templates.toArray(new DynamicTemplate[0]), true);
return this;
}
-
@Override
- protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,
+ Boolean includeInAll, Map<String, Mapper> mappers, @Nullable Settings settings) {
assert !nested.isNested();
- FormatDateTimeFormatter[] dates = null;
- if (dynamicDateTimeFormatters == null) {
- dates = new FormatDateTimeFormatter[0];
- } else if (dynamicDateTimeFormatters.isEmpty()) {
- // add the default one
- dates = Defaults.DYNAMIC_DATE_TIME_FORMATTERS;
- } else {
- dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
- }
- return new RootObjectMapper(name, enabled, dynamic, mappers,
- dates,
- dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
- dateDetection, numericDetection);
+ return new RootObjectMapper(name, enabled, dynamic, includeInAll, mappers,
+ dynamicDateTimeFormatters,
+ dynamicTemplates,
+ dateDetection, numericDetection, settings);
}
}
public static class TypeParser extends ObjectMapper.TypeParser {
@Override
- protected ObjectMapper.Builder createBuilder(String name) {
- return new Builder(name);
- }
-
- @Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
- ObjectMapper.Builder builder = createBuilder(name);
+ RootObjectMapper.Builder builder = new Builder(name);
Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Object> entry = iterator.next();
@@ -142,26 +107,22 @@ public class RootObjectMapper extends ObjectMapper {
return builder;
}
- protected boolean processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode,
+ protected boolean processField(RootObjectMapper.Builder builder, String fieldName, Object fieldNode,
Version indexVersionCreated) {
if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) {
- List<FormatDateTimeFormatter> dateTimeFormatters = new ArrayList<>();
if (fieldNode instanceof List) {
- for (Object node1 : (List) fieldNode) {
- if (node1.toString().startsWith("epoch_")) {
- throw new MapperParsingException("Epoch ["+ node1.toString() +"] is not supported as dynamic date format");
+ List<FormatDateTimeFormatter> formatters = new ArrayList<>();
+ for (Object formatter : (List<?>) fieldNode) {
+ if (formatter.toString().startsWith("epoch_")) {
+ throw new MapperParsingException("Epoch ["+ formatter +"] is not supported as dynamic date format");
}
- dateTimeFormatters.add(parseDateTimeFormatter(node1));
+ formatters.add(parseDateTimeFormatter(formatter));
}
+ builder.dynamicDateTimeFormatter(formatters);
} else if ("none".equals(fieldNode.toString())) {
- dateTimeFormatters = null;
- } else {
- dateTimeFormatters.add(parseDateTimeFormatter(fieldNode));
- }
- if (dateTimeFormatters == null) {
- ((Builder) builder).noDynamicDateTimeFormatter();
+ builder.dynamicDateTimeFormatter(Collections.emptyList());
} else {
- ((Builder) builder).dynamicDateTimeFormatter(dateTimeFormatters);
+ builder.dynamicDateTimeFormatter(Collections.singleton(parseDateTimeFormatter(fieldNode)));
}
return true;
} else if (fieldName.equals("dynamic_templates")) {
@@ -174,7 +135,8 @@ public class RootObjectMapper extends ObjectMapper {
// }
// }
// ]
- List tmplNodes = (List) fieldNode;
+ List<?> tmplNodes = (List<?>) fieldNode;
+ List<DynamicTemplate> templates = new ArrayList<>();
for (Object tmplNode : tmplNodes) {
Map<String, Object> tmpl = (Map<String, Object>) tmplNode;
if (tmpl.size() != 1) {
@@ -185,31 +147,31 @@ public class RootObjectMapper extends ObjectMapper {
Map<String, Object> templateParams = (Map<String, Object>) entry.getValue();
DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams, indexVersionCreated);
if (template != null) {
- ((Builder) builder).add(template);
+ templates.add(template);
}
}
+ builder.dynamicTemplates(templates);
return true;
} else if (fieldName.equals("date_detection")) {
- ((Builder) builder).dateDetection = lenientNodeBooleanValue(fieldNode);
+ ((Builder) builder).dateDetection = new Explicit<>(nodeBooleanValue(fieldNode), true);
return true;
} else if (fieldName.equals("numeric_detection")) {
- ((Builder) builder).numericDetection = lenientNodeBooleanValue(fieldNode);
+ ((Builder) builder).numericDetection = new Explicit<>(nodeBooleanValue(fieldNode), true);
return true;
}
return false;
}
}
- private final FormatDateTimeFormatter[] dynamicDateTimeFormatters;
-
- private final boolean dateDetection;
- private final boolean numericDetection;
+ private Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters;
+ private Explicit<Boolean> dateDetection;
+ private Explicit<Boolean> numericDetection;
+ private Explicit<DynamicTemplate[]> dynamicTemplates;
- private volatile DynamicTemplate dynamicTemplates[];
-
- RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map<String, Mapper> mappers,
- FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
- super(name, name, enabled, Nested.NO, dynamic, mappers);
+ RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Boolean includeInAll, Map<String, Mapper> mappers,
+ Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters, Explicit<DynamicTemplate[]> dynamicTemplates,
+ Explicit<Boolean> dateDetection, Explicit<Boolean> numericDetection, Settings settings) {
+ super(name, name, enabled, Nested.NO, dynamic, includeInAll, mappers, settings);
this.dynamicTemplates = dynamicTemplates;
this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
this.dateDetection = dateDetection;
@@ -219,21 +181,26 @@ public class RootObjectMapper extends ObjectMapper {
@Override
public ObjectMapper mappingUpdate(Mapper mapper) {
RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper);
- // dynamic templates are irrelevant for dynamic mappings updates
- update.dynamicTemplates = new DynamicTemplate[0];
+ // for dynamic updates, no need to carry root-specific options, we just
+ // set everything to they implicit default value so that they are not
+ // applied at merge time
+ update.dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false);
+ update.dynamicDateTimeFormatters = new Explicit<FormatDateTimeFormatter[]>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
+ update.dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false);
+ update.numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false);
return update;
}
public boolean dateDetection() {
- return this.dateDetection;
+ return this.dateDetection.value();
}
public boolean numericDetection() {
- return this.numericDetection;
+ return this.numericDetection.value();
}
public FormatDateTimeFormatter[] dynamicDateTimeFormatters() {
- return dynamicDateTimeFormatters;
+ return dynamicDateTimeFormatters.value();
}
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, XContentFieldType matchType) {
@@ -263,7 +230,7 @@ public class RootObjectMapper extends ObjectMapper {
public DynamicTemplate findTemplate(ContentPath path, String name, XContentFieldType matchType) {
final String pathAsString = path.pathAsText(name);
- for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
+ for (DynamicTemplate dynamicTemplate : dynamicTemplates.value()) {
if (dynamicTemplate.match(pathAsString, name, matchType)) {
return dynamicTemplate;
}
@@ -280,21 +247,18 @@ public class RootObjectMapper extends ObjectMapper {
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
- // merge them
- List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
- for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
- boolean replaced = false;
- for (int i = 0; i < mergedTemplates.size(); i++) {
- if (mergedTemplates.get(i).name().equals(template.name())) {
- mergedTemplates.set(i, template);
- replaced = true;
- }
- }
- if (!replaced) {
- mergedTemplates.add(template);
- }
+ if (mergeWithObject.numericDetection.explicit()) {
+ this.numericDetection = mergeWithObject.numericDetection;
+ }
+ if (mergeWithObject.dateDetection.explicit()) {
+ this.dateDetection = mergeWithObject.dateDetection;
+ }
+ if (mergeWithObject.dynamicDateTimeFormatters.explicit()) {
+ this.dynamicDateTimeFormatters = mergeWithObject.dynamicDateTimeFormatters;
+ }
+ if (mergeWithObject.dynamicTemplates.explicit()) {
+ this.dynamicTemplates = mergeWithObject.dynamicTemplates;
}
- this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
}
@Override
@@ -304,19 +268,19 @@ public class RootObjectMapper extends ObjectMapper {
@Override
protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
- if (dynamicDateTimeFormatters != Defaults.DYNAMIC_DATE_TIME_FORMATTERS) {
- if (dynamicDateTimeFormatters.length > 0) {
- builder.startArray("dynamic_date_formats");
- for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters) {
- builder.value(dateTimeFormatter.format());
- }
- builder.endArray();
+ final boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ if (dynamicDateTimeFormatters.explicit() || includeDefaults) {
+ builder.startArray("dynamic_date_formats");
+ for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters.value()) {
+ builder.value(dateTimeFormatter.format());
}
+ builder.endArray();
}
- if (dynamicTemplates != null && dynamicTemplates.length > 0) {
+ if (dynamicTemplates.explicit() || includeDefaults) {
builder.startArray("dynamic_templates");
- for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
+ for (DynamicTemplate dynamicTemplate : dynamicTemplates.value()) {
builder.startObject();
builder.field(dynamicTemplate.name(), dynamicTemplate);
builder.endObject();
@@ -324,11 +288,11 @@ public class RootObjectMapper extends ObjectMapper {
builder.endArray();
}
- if (dateDetection != Defaults.DATE_DETECTION) {
- builder.field("date_detection", dateDetection);
+ if (dateDetection.explicit() || includeDefaults) {
+ builder.field("date_detection", dateDetection.value());
}
- if (numericDetection != Defaults.NUMERIC_DETECTION) {
- builder.field("numeric_detection", numericDetection);
+ if (numericDetection.explicit() || includeDefaults) {
+ builder.field("numeric_detection", numericDetection.value());
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java
index 8c50e86802..3608da30f7 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java
@@ -65,7 +65,7 @@ import java.util.Map;
/** A {@link FieldMapper} for scaled floats. Values are internally multiplied
* by a scaling factor and rounded to the closest long. */
-public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class ScaledFloatFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "scaled_float";
// use the same default as numbers
@@ -124,10 +124,8 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
throw new IllegalArgumentException("Field [" + name + "] misses required parameter [scaling_factor]");
}
setupFieldType(context);
- ScaledFloatFieldMapper fieldMapper =
- new ScaledFloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
- coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return (ScaledFloatFieldMapper) fieldMapper.includeInAll(includeInAll);
+ return new ScaledFloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
+ coerce(context), includeInAll, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
}
@@ -246,7 +244,7 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
lo = Math.round(Math.ceil(dValue * scalingFactor));
}
Long hi = null;
- if (lowerTerm != null) {
+ if (upperTerm != null) {
double dValue = NumberFieldMapper.NumberType.DOUBLE.parse(upperTerm).doubleValue();
if (includeUpper == false) {
dValue = Math.nextDown(dValue);
@@ -336,6 +334,7 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
MappedFieldType defaultFieldType,
Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce,
+ Boolean includeInAll,
Settings indexSettings,
MultiFields multiFields,
CopyTo copyTo) {
@@ -346,6 +345,7 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
}
this.ignoreMalformed = ignoreMalformed;
this.coerce = coerce;
+ this.includeInAll = includeInAll;
}
@Override
@@ -364,40 +364,9 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
}
@Override
- public Mapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- ScaledFloatFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- ScaledFloatFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public Mapper unsetIncludeInAll() {
- if (includeInAll != null) {
- ScaledFloatFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ final boolean includeInAll = context.includeInAll(this.includeInAll, this);
+
XContentParser parser = context.parser();
Object value;
Number numericValue = null;
@@ -410,18 +379,20 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
&& parser.textLength() == 0) {
value = null;
} else {
- value = parser.textOrNull();
- if (value != null) {
- try {
- numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(parser, coerce.value());
- } catch (IllegalArgumentException e) {
- if (ignoreMalformed.value()) {
- return;
- } else {
- throw e;
- }
+ try {
+ numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(parser, coerce.value());
+ } catch (IllegalArgumentException e) {
+ if (ignoreMalformed.value()) {
+ return;
+ } else {
+ throw e;
}
}
+ if (includeInAll) {
+ value = parser.textOrNull(); // preserve formatting
+ } else {
+ value = numericValue;
+ }
}
if (value == null) {
@@ -436,7 +407,7 @@ public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMappe
numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(value);
}
- if (context.includeInAll(includeInAll, this)) {
+ if (includeInAll) {
context.allEntries().addText(fieldType().name(), value.toString(), fieldType().boost());
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
index 4854eb5775..63d4d958b3 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
@@ -275,15 +275,15 @@ public class SourceFieldMapper extends MetadataFieldMapper {
}
if (includes != null) {
- builder.field("includes", includes);
+ builder.array("includes", includes);
} else if (includeDefaults) {
- builder.field("includes", Strings.EMPTY_ARRAY);
+ builder.array("includes", Strings.EMPTY_ARRAY);
}
if (excludes != null) {
- builder.field("excludes", excludes);
+ builder.array("excludes", excludes);
} else if (includeDefaults) {
- builder.field("excludes", Strings.EMPTY_ARRAY);
+ builder.array("excludes", Strings.EMPTY_ARRAY);
}
builder.endObject();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java
index 887a40fe70..d290ef0fb5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldMapper.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.IndexOptions;
@@ -26,7 +27,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -49,7 +49,7 @@ import java.util.Set;
import static org.apache.lucene.index.IndexOptions.NONE;
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
-public class StringFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class StringFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "string";
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
@@ -177,10 +177,9 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap));
}
setupFieldType(context);
- StringFieldMapper fieldMapper = new StringFieldMapper(
- name, fieldType(), defaultFieldType, positionIncrementGap, ignoreAbove,
+ return new StringFieldMapper(
+ name, fieldType(), defaultFieldType, positionIncrementGap, ignoreAbove, includeInAll,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return fieldMapper.includeInAll(includeInAll);
}
}
@@ -188,7 +187,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
private final DeprecationLogger deprecationLogger;
public TypeParser() {
- ESLogger logger = Loggers.getLogger(getClass());
+ Logger logger = Loggers.getLogger(getClass());
this.deprecationLogger = new DeprecationLogger(logger);
}
@@ -319,13 +318,13 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
// we need to update to actual analyzers if they are not set in this case...
// so we can inject the position increment gap...
if (builder.fieldType().indexAnalyzer() == null) {
- builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
+ builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
}
if (builder.fieldType().searchAnalyzer() == null) {
- builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
+ builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
}
if (builder.fieldType().searchQuoteAnalyzer() == null) {
- builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
+ builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
}
iterator.remove();
} else if (propName.equals("ignore_above")) {
@@ -488,7 +487,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
private int ignoreAbove;
protected StringFieldMapper(String simpleName, StringFieldType fieldType, MappedFieldType defaultFieldType,
- int positionIncrementGap, int ignoreAbove,
+ int positionIncrementGap, int ignoreAbove, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0_alpha1)) {
@@ -506,6 +505,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
this.positionIncrementGap = positionIncrementGap;
this.ignoreAbove = ignoreAbove;
+ this.includeInAll = includeInAll;
}
@Override
@@ -514,39 +514,6 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
@Override
- public StringFieldMapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- StringFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- StringFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public StringFieldMapper unsetIncludeInAll() {
- if (includeInAll != null) {
- StringFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
protected boolean customBoost() {
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
index 987586db72..63febfcaf0 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
@@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
+import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
@@ -30,19 +31,31 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Set;
+import static java.util.Collections.unmodifiableList;
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
/** A {@link FieldMapper} for full-text fields. */
-public class TextFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
+public class TextFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "text";
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
+ private static final List<String> SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING = unmodifiableList(Arrays.asList(
+ "type",
+ // common text parameters, for which the upgrade is straightforward
+ "index", "store", "doc_values", "omit_norms", "norms", "boost", "fields", "copy_to",
+ "fielddata", "eager_global_ordinals", "fielddata_frequency_filter", "include_in_all",
+ "analyzer", "search_analyzer", "search_quote_analyzer",
+ "index_options", "position_increment_gap", "similarity"));
+
public static class Defaults {
public static double FIELDDATA_MIN_FREQUENCY = 0;
public static double FIELDDATA_MAX_FREQUENCY = Integer.MAX_VALUE;
@@ -120,20 +133,54 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap));
}
setupFieldType(context);
- TextFieldMapper fieldMapper = new TextFieldMapper(
- name, fieldType, defaultFieldType, positionIncrementGap,
+ return new TextFieldMapper(
+ name, fieldType, defaultFieldType, positionIncrementGap, includeInAll,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- return fieldMapper.includeInAll(includeInAll);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1)) {
+ // Downgrade "text" to "string" in indexes created in 2.x so you can use modern syntax against old indexes
+ Set<String> unsupportedParameters = new HashSet<>(node.keySet());
+ unsupportedParameters.removeAll(SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING);
+ if (false == SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING.containsAll(node.keySet())) {
+ throw new IllegalArgumentException("Automatic downgrade from [text] to [string] failed because parameters "
+ + unsupportedParameters + " are not supported for automatic downgrades.");
+ }
+ { // Downgrade "index"
+ Object index = node.get("index");
+ if (index == null || Boolean.TRUE.equals(index)) {
+ index = "analyzed";
+ } else if (Boolean.FALSE.equals(index)) {
+ index = "no";
+ } else {
+ throw new IllegalArgumentException(
+ "Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]");
+ }
+ node.put("index", index);
+ }
+ { // Downgrade "fielddata" (default in string is true, default in text is false)
+ Object fielddata = node.get("fielddata");
+ if (fielddata == null || Boolean.FALSE.equals(fielddata)) {
+ fielddata = false;
+ } else if (Boolean.TRUE.equals(fielddata)) {
+ fielddata = true;
+ } else {
+ throw new IllegalArgumentException("can't parse [fielddata] value for [" + fielddata + "] for field ["
+ + fieldName + "], expected [true] or [false]");
+ }
+ node.put("fielddata", fielddata);
+ }
+
+ return new StringFieldMapper.TypeParser().parse(fieldName, node, parserContext);
+ }
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(fieldName);
- builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
- builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
- builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
+ builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
+ builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
+ builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
parseTextField(builder, fieldName, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
@@ -297,7 +344,7 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
private int positionIncrementGap;
protected TextFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- int positionIncrementGap,
+ int positionIncrementGap, Boolean includeInAll,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
assert fieldType.tokenized();
@@ -306,6 +353,7 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
throw new IllegalArgumentException("Cannot enable fielddata on a [text] field that is not indexed: [" + name() + "]");
}
this.positionIncrementGap = positionIncrementGap;
+ this.includeInAll = includeInAll;
}
@Override
@@ -318,39 +366,6 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
return includeInAll;
}
- @Override
- public TextFieldMapper includeInAll(Boolean includeInAll) {
- if (includeInAll != null) {
- TextFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public TextFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
- if (includeInAll != null && this.includeInAll == null) {
- TextFieldMapper clone = clone();
- clone.includeInAll = includeInAll;
- return clone;
- } else {
- return this;
- }
- }
-
- @Override
- public TextFieldMapper unsetIncludeInAll() {
- if (includeInAll != null) {
- TextFieldMapper clone = clone();
- clone.includeInAll = null;
- return clone;
- } else {
- return this;
- }
- }
-
public int getPositionIncrementGap() {
return this.positionIncrementGap;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java
index daa36664f9..3b6026d1b2 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java
@@ -89,7 +89,7 @@ public class TokenCountFieldMapper extends FieldMapper {
builder.nullValue(nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("analyzer")) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
@@ -182,9 +182,4 @@ public class TokenCountFieldMapper extends FieldMapper {
builder.field("analyzer", analyzer());
}
- @Override
- public boolean isGenerated() {
- return true;
- }
-
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java
index e388c8ea57..f192efc24a 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java
@@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.IndexOptions;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.logging.DeprecationLogger;
@@ -31,7 +30,6 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.similarity.SimilarityProvider;
-import org.elasticsearch.index.similarity.SimilarityService;
import java.util.Arrays;
import java.util.Collections;
@@ -127,21 +125,21 @@ public class TypeParsers {
builder.storeTermVectorPayloads(nodeBooleanValue("store_term_vector_payloads", propNode, parserContext));
iterator.remove();
} else if (propName.equals("analyzer")) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
indexAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_analyzer")) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
searchAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_quote_analyzer")) {
- NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java
index c66044bdb3..eb46f7a21d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java
@@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -47,6 +48,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
static {
FIELD_TYPE.setName(NAME);
FIELD_TYPE.setDocValuesType(DocValuesType.NUMERIC);
+ FIELD_TYPE.setIndexOptions(IndexOptions.NONE);
FIELD_TYPE.setHasDocValues(true);
FIELD_TYPE.freeze();
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java
index 7b375f125c..8e87782343 100644
--- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java
@@ -338,10 +338,6 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
default:
throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]");
}
- if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
- throw new ParsingException(parser.getTokenLocation(),
- "expected [END_OBJECT] but got [{}], possibly too many query clauses", parser.currentToken());
- }
} else if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
switch (currentFieldName) {
diff --git a/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java
index 21328ff8fc..3bd7a8abc1 100644
--- a/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java
@@ -383,7 +383,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
analyzerObj = context.getMapperService().searchAnalyzer();
}
} else {
- analyzerObj = context.getMapperService().analysisService().analyzer(analyzer);
+ analyzerObj = context.getMapperService().getIndexAnalyzers().get(analyzer);
if (analyzerObj == null) {
throw new QueryShardException(context, "[common] analyzer [" + analyzer + "] not found");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java
index a884c5aea1..fe5d566412 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
+import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
@@ -38,10 +39,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxQuery;
-import org.elasticsearch.index.search.geo.IndexedGeoBoundingBoxQuery;
+import org.elasticsearch.index.search.geo.LegacyInMemoryGeoBoundingBoxQuery;
+import org.elasticsearch.index.search.geo.LegacyIndexedGeoBoundingBoxQuery;
import java.io.IOException;
import java.util.Objects;
@@ -359,7 +361,10 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
}
}
- if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
+ if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ return LatLonPoint.newBoxQuery(fieldType.name(), luceneBottomRight.getLat(), luceneTopLeft.getLat(),
+ luceneTopLeft.getLon(), luceneBottomRight.getLon());
+ } else if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
@@ -371,12 +376,12 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
Query query;
switch(type) {
case INDEXED:
- GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
- query = IndexedGeoBoundingBoxQuery.create(luceneTopLeft, luceneBottomRight, geoFieldType);
+ LegacyGeoPointFieldType geoFieldType = ((LegacyGeoPointFieldType) fieldType);
+ query = LegacyIndexedGeoBoundingBoxQuery.create(luceneTopLeft, luceneBottomRight, geoFieldType);
break;
case MEMORY:
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
- query = new InMemoryGeoBoundingBoxQuery(luceneTopLeft, luceneBottomRight, indexFieldData);
+ query = new LegacyInMemoryGeoBoundingBoxQuery(luceneTopLeft, luceneBottomRight, indexFieldData);
break;
default:
// Someone extended the type enum w/o adjusting this switch statement.
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java
index 29b621e953..add2799b63 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
+import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
@@ -38,7 +39,8 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
+import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
@@ -63,6 +65,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
/** Default for geo distance computation. */
public static final GeoDistance DEFAULT_GEO_DISTANCE = GeoDistance.DEFAULT;
/** Default for optimising query through pre computed bounding box query. */
+ @Deprecated
public static final String DEFAULT_OPTIMIZE_BBOX = "memory";
/**
@@ -75,7 +78,9 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
.withAllDeprecated("use validation_method instead");
private static final ParseField COERCE_FIELD = new ParseField("coerce", "normalize")
.withAllDeprecated("use validation_method instead");
- private static final ParseField OPTIMIZE_BBOX_FIELD = new ParseField("optimize_bbox");
+ @Deprecated
+ private static final ParseField OPTIMIZE_BBOX_FIELD = new ParseField("optimize_bbox")
+ .withAllDeprecated("no replacement: `optimize_bbox` is no longer supported due to recent improvements");
private static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type");
private static final ParseField UNIT_FIELD = new ParseField("unit");
private static final ParseField DISTANCE_FIELD = new ParseField("distance");
@@ -89,7 +94,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
/** Algorithm to use for distance computation. */
private GeoDistance geoDistance = DEFAULT_GEO_DISTANCE;
/** Whether or not to use a bbox for pre-filtering. TODO change to enum? */
- private String optimizeBbox = DEFAULT_OPTIMIZE_BBOX;
+ private String optimizeBbox = null;
/** How strict should geo coordinate validation be? */
private GeoValidationMethod validationMethod = GeoValidationMethod.DEFAULT;
@@ -115,7 +120,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
distance = in.readDouble();
validationMethod = GeoValidationMethod.readFromStream(in);
center = in.readGeoPoint();
- optimizeBbox = in.readString();
+ optimizeBbox = in.readOptionalString();
geoDistance = GeoDistance.readFromStream(in);
ignoreUnmapped = in.readBoolean();
}
@@ -126,7 +131,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
out.writeDouble(distance);
validationMethod.writeTo(out);
out.writeGeoPoint(center);
- out.writeString(optimizeBbox);
+ out.writeOptionalString(optimizeBbox);
geoDistance.writeTo(out);
out.writeBoolean(ignoreUnmapped);
}
@@ -220,26 +225,20 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
* Set this to memory or indexed if before running the distance
* calculation you want to limit the candidates to hits in the
* enclosing bounding box.
+ * @deprecated
**/
+ @Deprecated
public GeoDistanceQueryBuilder optimizeBbox(String optimizeBbox) {
- if (optimizeBbox == null) {
- throw new IllegalArgumentException("optimizeBbox must not be null");
- }
- switch (optimizeBbox) {
- case "none":
- case "memory":
- case "indexed":
- break;
- default:
- throw new IllegalArgumentException("optimizeBbox must be one of [none, memory, indexed]");
- }
this.optimizeBbox = optimizeBbox;
return this;
}
/**
* Returns whether or not to run a BoundingBox query prior to
- * distance query for optimization purposes.*/
+ * distance query for optimization purposes.
+ * @deprecated
+ **/
+ @Deprecated
public String optimizeBbox() {
return this.optimizeBbox;
}
@@ -300,11 +299,14 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
- if (indexVersionCreated.before(Version.V_2_2_0)) {
- GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
+ if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ return LatLonPoint.newDistanceQuery(fieldType.name(), center.lat(), center.lon(), normDistance);
+ } else if (indexVersionCreated.before(Version.V_2_2_0)) {
+ LegacyGeoPointFieldMapper.LegacyGeoPointFieldType geoFieldType = (LegacyGeoPointFieldMapper.LegacyGeoPointFieldType) fieldType;
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);
+ String bboxOptimization = Strings.isEmpty(optimizeBbox) ? DEFAULT_OPTIMIZE_BBOX : optimizeBbox;
return new GeoDistanceRangeQuery(center, null, normDistance, true, false, geoDistance,
- geoFieldType, indexFieldData, optimizeBbox);
+ geoFieldType, indexFieldData, bboxOptimization);
}
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
@@ -324,7 +326,9 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
builder.startArray(fieldName).value(center.lon()).value(center.lat()).endArray();
builder.field(DISTANCE_FIELD.getPreferredName(), distance);
builder.field(DISTANCE_TYPE_FIELD.getPreferredName(), geoDistance.name().toLowerCase(Locale.ROOT));
- builder.field(OPTIMIZE_BBOX_FIELD.getPreferredName(), optimizeBbox);
+ if (Strings.isEmpty(optimizeBbox) == false) {
+ builder.field(OPTIMIZE_BBOX_FIELD.getPreferredName(), optimizeBbox);
+ }
builder.field(VALIDATION_METHOD_FIELD.getPreferredName(), validationMethod);
builder.field(IGNORE_UNMAPPED_FIELD.getPreferredName(), ignoreUnmapped);
printBoostAndQueryName(builder);
@@ -344,7 +348,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
Object vDistance = null;
DistanceUnit unit = GeoDistanceQueryBuilder.DEFAULT_DISTANCE_UNIT;
GeoDistance geoDistance = GeoDistanceQueryBuilder.DEFAULT_GEO_DISTANCE;
- String optimizeBbox = GeoDistanceQueryBuilder.DEFAULT_OPTIMIZE_BBOX;
+ String optimizeBbox = null;
boolean coerce = GeoValidationMethod.DEFAULT_LENIENT_PARSING;
boolean ignoreMalformed = GeoValidationMethod.DEFAULT_LENIENT_PARSING;
GeoValidationMethod validationMethod = null;
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java
index d4142e12b3..f26c532610 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java
@@ -37,8 +37,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
+import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
@@ -54,6 +55,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
public static final boolean DEFAULT_INCLUDE_UPPER = true;
public static final GeoDistance DEFAULT_GEO_DISTANCE = GeoDistance.DEFAULT;
public static final DistanceUnit DEFAULT_UNIT = DistanceUnit.DEFAULT;
+ @Deprecated
public static final String DEFAULT_OPTIMIZE_BBOX = "memory";
/**
@@ -73,7 +75,9 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
private static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type");
private static final ParseField NAME_FIELD = new ParseField("_name");
private static final ParseField BOOST_FIELD = new ParseField("boost");
- private static final ParseField OPTIMIZE_BBOX_FIELD = new ParseField("optimize_bbox");
+ @Deprecated
+ private static final ParseField OPTIMIZE_BBOX_FIELD = new ParseField("optimize_bbox")
+ .withAllDeprecated("no replacement: `optimize_bbox` is no longer supported due to recent improvements");
private static final ParseField COERCE_FIELD = new ParseField("coerce", "normalize")
.withAllDeprecated("use validation_method instead");
private static final ParseField IGNORE_MALFORMED_FIELD = new ParseField("ignore_malformed")
@@ -96,7 +100,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
private DistanceUnit unit = DEFAULT_UNIT;
- private String optimizeBbox = DEFAULT_OPTIMIZE_BBOX;
+ private String optimizeBbox = null;
private GeoValidationMethod validationMethod = GeoValidationMethod.DEFAULT;
@@ -132,7 +136,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
includeUpper = in.readBoolean();
unit = DistanceUnit.valueOf(in.readString());
geoDistance = GeoDistance.readFromStream(in);
- optimizeBbox = in.readString();
+ optimizeBbox = in.readOptionalString();
validationMethod = GeoValidationMethod.readFromStream(in);
ignoreUnmapped = in.readBoolean();
}
@@ -147,7 +151,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
out.writeBoolean(includeUpper);
out.writeString(unit.name());
geoDistance.writeTo(out);;
- out.writeString(optimizeBbox);
+ out.writeOptionalString(optimizeBbox);
validationMethod.writeTo(out);
out.writeBoolean(ignoreUnmapped);
}
@@ -242,22 +246,13 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
return unit;
}
+ @Deprecated
public GeoDistanceRangeQueryBuilder optimizeBbox(String optimizeBbox) {
- if (optimizeBbox == null) {
- throw new IllegalArgumentException("optimizeBbox must not be null");
- }
- switch (optimizeBbox) {
- case "none":
- case "memory":
- case "indexed":
- break;
- default:
- throw new IllegalArgumentException("optimizeBbox must be one of [none, memory, indexed]");
- }
this.optimizeBbox = optimizeBbox;
return this;
}
+ @Deprecated
public String optimizeBbox() {
return optimizeBbox;
}
@@ -353,11 +348,15 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
}
final Version indexVersionCreated = context.indexVersionCreated();
- if (indexVersionCreated.before(Version.V_2_2_0)) {
- GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
+ if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ throw new QueryShardException(context, "[{}] queries are no longer supported for geo_point field types. "
+ + "Use geo_distance sort or aggregations", NAME);
+ } else if (indexVersionCreated.before(Version.V_2_2_0)) {
+ LegacyGeoPointFieldType geoFieldType = (LegacyGeoPointFieldType) fieldType;
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
+ String bboxOptimization = Strings.isEmpty(optimizeBbox) ? DEFAULT_OPTIMIZE_BBOX : optimizeBbox;
return new GeoDistanceRangeQuery(point, fromValue, toValue, includeLower, includeUpper, geoDistance, geoFieldType,
- indexFieldData, optimizeBbox);
+ indexFieldData, bboxOptimization);
}
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
@@ -380,7 +379,9 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
builder.field(INCLUDE_UPPER_FIELD.getPreferredName(), includeUpper);
builder.field(UNIT_FIELD.getPreferredName(), unit);
builder.field(DISTANCE_TYPE_FIELD.getPreferredName(), geoDistance.name().toLowerCase(Locale.ROOT));
- builder.field(OPTIMIZE_BBOX_FIELD.getPreferredName(), optimizeBbox);
+ if (Strings.isEmpty(optimizeBbox) == false) {
+ builder.field(OPTIMIZE_BBOX_FIELD.getPreferredName(), optimizeBbox);
+ }
builder.field(VALIDATION_METHOD.getPreferredName(), validationMethod);
builder.field(IGNORE_UNMAPPED_FIELD.getPreferredName(), ignoreUnmapped);
printBoostAndQueryName(builder);
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java
index 65ce33c1c9..e5e8e69fd5 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index.query;
+import org.apache.lucene.document.LatLonPoint;
+import org.apache.lucene.geo.Polygon;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
@@ -36,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoPolygonQuery;
@@ -210,10 +213,14 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
double[] lons = new double[shellSize];
GeoPoint p;
for (int i=0; i<shellSize; ++i) {
- p = new GeoPoint(shell.get(i));
+ p = shell.get(i);
lats[i] = p.lat();
lons[i] = p.lon();
}
+
+ if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ return LatLonPoint.newPolygonQuery(fieldType.name(), new Polygon(lats, lons));
+ }
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java b/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java
index 57a189b72f..ab3b23af0f 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java
@@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
@@ -83,7 +84,7 @@ public class GeohashCellQuery {
* @param geohashes optional array of additional geohashes
* @return a new GeoBoundinboxfilter
*/
- public static Query create(QueryShardContext context, BaseGeoPointFieldMapper.GeoPointFieldType fieldType,
+ public static Query create(QueryShardContext context, BaseGeoPointFieldMapper.LegacyGeoPointFieldType fieldType,
String geohash, @Nullable List<CharSequence> geohashes) {
MappedFieldType geoHashMapper = fieldType.geoHashFieldType();
if (geoHashMapper == null) {
@@ -241,11 +242,14 @@ public class GeohashCellQuery {
}
}
- if (!(fieldType instanceof BaseGeoPointFieldMapper.GeoPointFieldType)) {
+ if (fieldType instanceof LatLonPointFieldMapper.LatLonPointFieldType) {
+ throw new QueryShardException(context, "failed to parse [{}] query. "
+ + "geo_point field no longer supports geohash_cell queries", NAME);
+ } else if (!(fieldType instanceof BaseGeoPointFieldMapper.LegacyGeoPointFieldType)) {
throw new QueryShardException(context, "failed to parse [{}] query. field [{}] is not a geo_point field", NAME, fieldName);
}
- BaseGeoPointFieldMapper.GeoPointFieldType geoFieldType = ((BaseGeoPointFieldMapper.GeoPointFieldType) fieldType);
+ BaseGeoPointFieldMapper.LegacyGeoPointFieldType geoFieldType = ((BaseGeoPointFieldMapper.LegacyGeoPointFieldType) fieldType);
if (!geoFieldType.isGeoHashPrefixEnabled()) {
throw new QueryShardException(context, "failed to parse [{}] query. [geohash_prefix] is not enabled for field [{}]", NAME,
fieldName);
diff --git a/core/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java
index 736387a0d2..734d4cda92 100644
--- a/core/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java
@@ -132,7 +132,7 @@ public class IndicesQueryBuilder extends AbstractQueryBuilder<IndicesQueryBuilde
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
- builder.field(INDICES_FIELD.getPreferredName(), indices);
+ builder.array(INDICES_FIELD.getPreferredName(), indices);
builder.field(QUERY_FIELD.getPreferredName());
innerQuery.toXContent(builder, params);
builder.field(NO_MATCH_QUERY.getPreferredName());
diff --git a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
index ff783883ff..454c808a5d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
@@ -34,8 +34,8 @@ import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
@@ -94,7 +94,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
ObjectParser.ValueType.OBJECT_ARRAY);
PARSER.declareField((p, i, c) -> {
try {
- i.setFetchSourceContext(FetchSourceContext.parse(c));
+ i.setFetchSourceContext(FetchSourceContext.parse(c.parser()));
} catch (IOException e) {
throw new ParsingException(p.getTokenLocation(), "Could not parse inner _source definition", e);
}
@@ -137,7 +137,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
private boolean version;
private boolean trackScores;
- private List<String> storedFieldNames;
+ private StoredFieldsContext storedFieldsContext;
private QueryBuilder query = DEFAULT_INNER_HIT_QUERY;
private List<SortBuilder<?>> sorts;
private List<String> docValueFields;
@@ -156,14 +156,14 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
explain = other.explain;
version = other.version;
trackScores = other.trackScores;
- if (other.storedFieldNames != null) {
- storedFieldNames = new ArrayList<>(other.storedFieldNames);
+ if (other.storedFieldsContext != null) {
+ storedFieldsContext = new StoredFieldsContext(other.storedFieldsContext);
}
if (other.docValueFields != null) {
- docValueFields = new ArrayList<>(other.docValueFields);
+ docValueFields = new ArrayList<> (other.docValueFields);
}
if (other.scriptFields != null) {
- scriptFields = new HashSet<>(other.scriptFields);
+ scriptFields = new HashSet<> (other.scriptFields);
}
if (other.fetchSourceContext != null) {
fetchSourceContext = new FetchSourceContext(
@@ -210,7 +210,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
explain = in.readBoolean();
version = in.readBoolean();
trackScores = in.readBoolean();
- storedFieldNames = (List<String>) in.readGenericValue();
+ storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);
docValueFields = (List<String>) in.readGenericValue();
if (in.readBoolean()) {
int size = in.readVInt();
@@ -219,7 +219,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
scriptFields.add(new ScriptField(in));
}
}
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
int size = in.readVInt();
sorts = new ArrayList<>(size);
@@ -248,17 +248,17 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
out.writeBoolean(explain);
out.writeBoolean(version);
out.writeBoolean(trackScores);
- out.writeGenericValue(storedFieldNames);
+ out.writeOptionalWriteable(storedFieldsContext);
out.writeGenericValue(docValueFields);
boolean hasScriptFields = scriptFields != null;
out.writeBoolean(hasScriptFields);
if (hasScriptFields) {
out.writeVInt(scriptFields.size());
for (ScriptField scriptField : scriptFields) {
- scriptField.writeTo(out);;
+ scriptField.writeTo(out);
}
}
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalWriteable(fetchSourceContext);
boolean hasSorts = sorts != null;
out.writeBoolean(hasSorts);
if (hasSorts) {
@@ -343,39 +343,42 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
/**
* Gets the stored fields to load and return.
*
- * @deprecated Use {@link InnerHitBuilder#getStoredFieldNames()} instead.
+ * @deprecated Use {@link InnerHitBuilder#getStoredFieldsContext()} instead.
*/
@Deprecated
public List<String> getFieldNames() {
- return storedFieldNames;
+ return storedFieldsContext == null ? null : storedFieldsContext.fieldNames();
}
/**
- * Sets the stored fields to load and return. If none
- * are specified, the source of the document will be returned.
+ * Sets the stored fields to load and return.
+ * If none are specified, the source of the document will be returned.
*
* @deprecated Use {@link InnerHitBuilder#setStoredFieldNames(List)} instead.
*/
@Deprecated
public InnerHitBuilder setFieldNames(List<String> fieldNames) {
- this.storedFieldNames = fieldNames;
- return this;
+ return setStoredFieldNames(fieldNames);
}
/**
- * Gets the stored fields to load and return.
+ * Gets the stored fields context.
*/
- public List<String> getStoredFieldNames() {
- return storedFieldNames;
+ public StoredFieldsContext getStoredFieldsContext() {
+ return storedFieldsContext;
}
/**
- * Sets the stored fields to load and return. If none
- * are specified, the source of the document will be returned.
+ * Sets the stored fields to load and return.
+ * If none are specified, the source of the document will be returned.
*/
public InnerHitBuilder setStoredFieldNames(List<String> fieldNames) {
- this.storedFieldNames = fieldNames;
+ if (storedFieldsContext == null) {
+ storedFieldsContext = StoredFieldsContext.fromList(fieldNames);
+ } else {
+ storedFieldsContext.addFieldNames(fieldNames);
+ }
return this;
}
@@ -564,22 +567,11 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
innerHitsContext.explain(explain);
innerHitsContext.version(version);
innerHitsContext.trackScores(trackScores);
- if (storedFieldNames != null) {
- if (storedFieldNames.isEmpty()) {
- innerHitsContext.emptyFieldNames();
- } else {
- for (String fieldName : storedFieldNames) {
- innerHitsContext.fieldNames().add(fieldName);
- }
- }
+ if (storedFieldsContext != null) {
+ innerHitsContext.storedFieldsContext(storedFieldsContext);
}
if (docValueFields != null) {
- DocValueFieldsContext docValueFieldsContext = innerHitsContext
- .getFetchSubPhaseContext(DocValueFieldsFetchSubPhase.CONTEXT_FACTORY);
- for (String field : docValueFields) {
- docValueFieldsContext.add(new DocValueFieldsContext.DocValueField(field));
- }
- docValueFieldsContext.setHitExecutionNeeded(true);
+ innerHitsContext.docValueFieldsContext(new DocValueFieldsContext(docValueFields));
}
if (scriptFields != null) {
for (ScriptField field : scriptFields) {
@@ -633,16 +625,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
if (fetchSourceContext != null) {
builder.field(SearchSourceBuilder._SOURCE_FIELD.getPreferredName(), fetchSourceContext, params);
}
- if (storedFieldNames != null) {
- if (storedFieldNames.size() == 1) {
- builder.field(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), storedFieldNames.get(0));
- } else {
- builder.startArray(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName());
- for (String fieldName : storedFieldNames) {
- builder.value(fieldName);
- }
- builder.endArray();
- }
+ if (storedFieldsContext != null) {
+ storedFieldsContext.toXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), builder);
}
if (docValueFields != null) {
builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName());
@@ -693,7 +677,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
Objects.equals(explain, that.explain) &&
Objects.equals(version, that.version) &&
Objects.equals(trackScores, that.trackScores) &&
- Objects.equals(storedFieldNames, that.storedFieldNames) &&
+ Objects.equals(storedFieldsContext, that.storedFieldsContext) &&
Objects.equals(docValueFields, that.docValueFields) &&
Objects.equals(scriptFields, that.scriptFields) &&
Objects.equals(fetchSourceContext, that.fetchSourceContext) &&
@@ -705,7 +689,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
@Override
public int hashCode() {
- return Objects.hash(name, nestedPath, parentChildType, from, size, explain, version, trackScores, storedFieldNames,
+ return Objects.hash(name, nestedPath, parentChildType, from, size, explain, version, trackScores, storedFieldsContext,
docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, query, childInnerHits);
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java
index bff28d0f5b..d5e4d1d2a8 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java
@@ -164,7 +164,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
- if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
+ if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
index 6fd6922d9c..4715a5cfa8 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
@@ -140,7 +140,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
- if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
+ if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java
index dc6ac99408..25397d2a3e 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java
@@ -444,7 +444,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// validate context specific fields
- if (analyzer != null && context.getAnalysisService().analyzer(analyzer) == null) {
+ if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
index 9fb1343b11..7ba39d7b34 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
@@ -147,7 +147,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
*/
public static final class Item implements ToXContent, Writeable {
public static final Item[] EMPTY_ARRAY = new Item[0];
-
+
public interface Field {
ParseField INDEX = new ParseField("_index");
ParseField TYPE = new ParseField("_type");
@@ -780,7 +780,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
if (fields != null) {
- builder.field(Field.FIELDS.getPreferredName(), fields);
+ builder.array(Field.FIELDS.getPreferredName(), fields);
}
buildLikeField(builder, Field.LIKE.getPreferredName(), likeTexts, likeItems);
buildLikeField(builder, Field.UNLIKE.getPreferredName(), unlikeTexts, unlikeItems);
@@ -791,7 +791,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
builder.field(Field.MIN_WORD_LENGTH.getPreferredName(), minWordLength);
builder.field(Field.MAX_WORD_LENGTH.getPreferredName(), maxWordLength);
if (stopWords != null) {
- builder.field(Field.STOP_WORDS.getPreferredName(), stopWords);
+ builder.array(Field.STOP_WORDS.getPreferredName(), stopWords);
}
if (analyzer != null) {
builder.field(Field.ANALYZER.getPreferredName(), analyzer);
@@ -1021,7 +1021,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
}
// set analyzer
- Analyzer analyzerObj = context.getAnalysisService().analyzer(analyzer);
+ Analyzer analyzerObj = context.getIndexAnalyzers().get(analyzer);
if (analyzerObj == null) {
analyzerObj = context.getMapperService().searchAnalyzer();
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
index 6ffcb19ea3..f45009f746 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
@@ -708,7 +708,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
protected Query doToQuery(QueryShardContext context) throws IOException {
MultiMatchQuery multiMatchQuery = new MultiMatchQuery(context);
if (analyzer != null) {
- if (context.getAnalysisService().analyzer(analyzer) == null) {
+ if (context.getIndexAnalyzers().get(analyzer) == null) {
throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found");
}
multiMatchQuery.setAnalyzer(analyzer);
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java
index 95fe0094ba..4fcecdf9f2 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java
@@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.script.Script;
import java.io.IOException;
import java.util.Objects;
@@ -42,11 +43,18 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
private final XContentParser parser;
private final IndicesQueriesRegistry indicesQueriesRegistry;
private final ParseFieldMatcher parseFieldMatcher;
+ private final String defaultScriptLanguage;
public QueryParseContext(IndicesQueriesRegistry registry, XContentParser parser, ParseFieldMatcher parseFieldMatcher) {
+ this(Script.DEFAULT_SCRIPT_LANG, registry, parser, parseFieldMatcher);
+ }
+
+ public QueryParseContext(String defaultScriptLanguage, IndicesQueriesRegistry registry, XContentParser parser,
+ ParseFieldMatcher parseFieldMatcher) {
this.indicesQueriesRegistry = Objects.requireNonNull(registry, "indices queries registry cannot be null");
this.parser = Objects.requireNonNull(parser, "parser cannot be null");
this.parseFieldMatcher = Objects.requireNonNull(parseFieldMatcher, "parse field matcher cannot be null");
+ this.defaultScriptLanguage = defaultScriptLanguage;
}
public XContentParser parser() {
@@ -85,16 +93,12 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
* Parses a query excluding the query element that wraps it
*/
public Optional<QueryBuilder> parseInnerQueryBuilder() throws IOException {
- // move to START object
- XContentParser.Token token;
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
- token = parser.nextToken();
- if (token != XContentParser.Token.START_OBJECT) {
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, must start with start_object");
}
}
- token = parser.nextToken();
- if (token == XContentParser.Token.END_OBJECT) {
+ if (parser.nextToken() == XContentParser.Token.END_OBJECT) {
// we encountered '{}' for a query clause
String msg = "query malformed, empty clause found at [" + parser.getTokenLocation() +"]";
DEPRECATION_LOGGER.deprecated(msg);
@@ -103,23 +107,27 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
}
return Optional.empty();
}
- if (token != XContentParser.Token.FIELD_NAME) {
+ if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no field after start_object");
}
String queryName = parser.currentName();
// move to the next START_OBJECT
- token = parser.nextToken();
- if (token != XContentParser.Token.START_OBJECT) {
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name");
}
@SuppressWarnings("unchecked")
Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,
parser.getTokenLocation()).fromXContent(this);
+ //end_object of the specific query (e.g. match, multi_match etc.) element
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
throw new ParsingException(parser.getTokenLocation(),
"[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]");
}
- parser.nextToken();
+ //end_object of the query object
+ if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(),
+ "[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]");
+ }
return result;
}
@@ -127,4 +135,12 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
public ParseFieldMatcher getParseFieldMatcher() {
return parseFieldMatcher;
}
+
+ /**
+ * Returns the default scripting language, that should be used if scripts don't specify the script language
+ * explicitly.
+ */
+ public String getDefaultScriptLanguage() {
+ return defaultScriptLanguage;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java
index d8ec9ef2a4..f12605088e 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java
@@ -28,6 +28,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.ScriptSettings;
/**
* Context object used to rewrite {@link QueryBuilder} instances into simplified version.
@@ -101,9 +102,18 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
/**
* Returns a new {@link QueryParseContext} that wraps the provided parser, using the ParseFieldMatcher settings that
- * are configured in the index settings
+ * are configured in the index settings. The default script language will always default to Painless.
*/
public QueryParseContext newParseContext(XContentParser parser) {
return new QueryParseContext(indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher());
}
+
+ /**
+ * Returns a new {@link QueryParseContext} like {@link #newParseContext(XContentParser)} with the only diffence, that
+ * the default script language will default to what has been set in the 'script.legacy.default_lang' setting.
+ */
+ public QueryParseContext newParseContextWithLegacyScriptLanguage(XContentParser parser) {
+ String defaultScriptLanguage = ScriptSettings.getLegacyDefaultLang(indexSettings.getNodeSettings());
+ return new QueryParseContext(defaultScriptLanguage, indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher());
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
index 78869f5374..377afca2f6 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
@@ -41,7 +41,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@@ -116,8 +116,8 @@ public class QueryShardContext extends QueryRewriteContext {
this.isFilter = false;
}
- public AnalysisService getAnalysisService() {
- return mapperService.analysisService();
+ public IndexAnalyzers getIndexAnalyzers() {
+ return mapperService.getIndexAnalyzers();
}
public Similarity getSearchSimilarity() {
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
index be6a170cc2..807343237d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
@@ -868,14 +868,14 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
if (analyzer == null) {
qpSettings.defaultAnalyzer(context.getMapperService().searchAnalyzer());
} else {
- NamedAnalyzer namedAnalyzer = context.getAnalysisService().analyzer(analyzer);
+ NamedAnalyzer namedAnalyzer = context.getIndexAnalyzers().get(analyzer);
if (namedAnalyzer == null) {
throw new QueryShardException(context, "[query_string] analyzer [" + analyzer + "] not found");
}
qpSettings.forceAnalyzer(namedAnalyzer);
}
if (quoteAnalyzer != null) {
- NamedAnalyzer namedAnalyzer = context.getAnalysisService().analyzer(quoteAnalyzer);
+ NamedAnalyzer namedAnalyzer = context.getIndexAnalyzers().get(quoteAnalyzer);
if (namedAnalyzer == null) {
throw new QueryShardException(context, "[query_string] quote_analyzer [" + quoteAnalyzer + "] not found");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
index e6e902e68f..3ff924b28d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
@@ -106,7 +106,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
// skip
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseContext.getParseFieldMatcher());
+ script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]");
}
@@ -116,7 +116,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
boost = parser.floatValue();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseContext.getParseFieldMatcher());
+ script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
index f408c0f147..fbe5964f2a 100644
--- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -355,7 +355,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
if (analyzer == null) {
luceneAnalyzer = context.getMapperService().searchAnalyzer();
} else {
- luceneAnalyzer = context.getAnalysisService().analyzer(analyzer);
+ luceneAnalyzer = context.getIndexAnalyzers().get(analyzer);
if (luceneAnalyzer == null) {
throw new QueryShardException(context, "[" + SimpleQueryStringBuilder.NAME + "] analyzer [" + analyzer
+ "] not found");
diff --git a/core/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java
index c23597dbf1..a279812df1 100644
--- a/core/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java
@@ -48,7 +48,6 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
public static boolean DEFAULT_IN_ORDER = true;
private static final ParseField SLOP_FIELD = new ParseField("slop");
- private static final ParseField COLLECT_PAYLOADS_FIELD = new ParseField("collect_payloads").withAllDeprecated("no longer supported");
private static final ParseField CLAUSES_FIELD = new ParseField("clauses");
private static final ParseField IN_ORDER_FIELD = new ParseField("in_order");
@@ -175,8 +174,6 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
} else if (token.isValue()) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, IN_ORDER_FIELD)) {
inOrder = parser.booleanValue();
- } else if (parseContext.getParseFieldMatcher().match(currentFieldName, COLLECT_PAYLOADS_FIELD)) {
- // Deprecated in 3.0.0
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
slop = parser.intValue();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java
index 4cbf71f294..e2fbc5955d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java
@@ -115,7 +115,7 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder<ScriptScore
currentFieldName = parser.currentName();
} else {
if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseContext.getParseFieldMatcher());
+ script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(), NAME + " query does not support [" + currentFieldName + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java
index 835ec8e143..f2161013de 100644
--- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java
+++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java
@@ -204,7 +204,7 @@ public class MatchQuery {
}
return context.getMapperService().searchAnalyzer();
} else {
- Analyzer analyzer = context.getMapperService().analysisService().analyzer(this.analyzer);
+ Analyzer analyzer = context.getMapperService().getIndexAnalyzers().get(this.analyzer);
if (analyzer == null) {
throw new IllegalArgumentException("No analyzer found for [" + this.analyzer + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java
index ebef702dfd..823f882f40 100644
--- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java
+++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java
@@ -35,13 +35,16 @@ import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
+import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import java.io.IOException;
/**
- *
+ * Query geo_point fields by distance ranges. Used for indexes created prior to 2.2
+ * @deprecated
*/
+@Deprecated
public class GeoDistanceRangeQuery extends Query {
private final double lat;
@@ -58,7 +61,8 @@ public class GeoDistanceRangeQuery extends Query {
private final IndexGeoPointFieldData indexFieldData;
public GeoDistanceRangeQuery(GeoPoint point, Double lowerVal, Double upperVal, boolean includeLower,
- boolean includeUpper, GeoDistance geoDistance, GeoPointFieldMapperLegacy.GeoPointFieldType fieldType,
+ boolean includeUpper, GeoDistance geoDistance,
+ LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType,
IndexGeoPointFieldData indexFieldData, String optimizeBbox) {
this.lat = point.lat();
this.lon = point.lon();
@@ -91,7 +95,8 @@ public class GeoDistanceRangeQuery extends Query {
if ("memory".equals(optimizeBbox)) {
boundingBoxFilter = null;
} else if ("indexed".equals(optimizeBbox)) {
- boundingBoxFilter = IndexedGeoBoundingBoxQuery.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), fieldType);
+ boundingBoxFilter = LegacyIndexedGeoBoundingBoxQuery.create(distanceBoundingCheck.topLeft(),
+ distanceBoundingCheck.bottomRight(), fieldType);
distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter
} else {
throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported");
@@ -207,7 +212,8 @@ public class GeoDistanceRangeQuery extends Query {
@Override
public String toString(String field) {
- return "GeoDistanceRangeQuery(" + indexFieldData.getFieldName() + ", " + geoDistance + ", [" + inclusiveLowerPoint + " - " + inclusiveUpperPoint + "], " + lat + ", " + lon + ")";
+ return "GeoDistanceRangeQuery(" + indexFieldData.getFieldName() + ", " + geoDistance + ", ["
+ + inclusiveLowerPoint + " - " + inclusiveUpperPoint + "], " + lat + ", " + lon + ")";
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyInMemoryGeoBoundingBoxQuery.java
index 789ee25e1b..2d8ea7af49 100644
--- a/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java
+++ b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyInMemoryGeoBoundingBoxQuery.java
@@ -34,15 +34,17 @@ import java.util.Objects;
/**
*
+ * @deprecated This query is no longer used for geo_point indexes created after version 2.1
*/
-public class InMemoryGeoBoundingBoxQuery extends Query {
+@Deprecated
+public class LegacyInMemoryGeoBoundingBoxQuery extends Query {
private final GeoPoint topLeft;
private final GeoPoint bottomRight;
private final IndexGeoPointFieldData indexFieldData;
- public InMemoryGeoBoundingBoxQuery(GeoPoint topLeft, GeoPoint bottomRight, IndexGeoPointFieldData indexFieldData) {
+ public LegacyInMemoryGeoBoundingBoxQuery(GeoPoint topLeft, GeoPoint bottomRight, IndexGeoPointFieldData indexFieldData) {
this.topLeft = topLeft;
this.bottomRight = bottomRight;
this.indexFieldData = indexFieldData;
@@ -87,7 +89,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query {
if (sameClassAs(obj) == false) {
return false;
}
- InMemoryGeoBoundingBoxQuery other = (InMemoryGeoBoundingBoxQuery) obj;
+ LegacyInMemoryGeoBoundingBoxQuery other = (LegacyInMemoryGeoBoundingBoxQuery) obj;
return fieldName().equalsIgnoreCase(other.fieldName())
&& topLeft.equals(other.topLeft)
&& bottomRight.equals(other.bottomRight);
diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java
index 5831f014c9..6fdb2a906c 100644
--- a/core/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxQuery.java
+++ b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java
@@ -24,15 +24,20 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.geo.GeoPoint;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
/**
+ *
+ * @deprecated This query is no longer used for geo_point indexes created after version 2.1
*/
-public class IndexedGeoBoundingBoxQuery {
+@Deprecated
+public class LegacyIndexedGeoBoundingBoxQuery {
- public static Query create(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapperLegacy.GeoPointFieldType fieldType) {
+ public static Query create(GeoPoint topLeft, GeoPoint bottomRight,
+ LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
if (!fieldType.isLatLonEnabled()) {
- throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldType.name() + "], can't use indexed filter on it");
+ throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldType.name()
+ + "], can't use indexed filter on it");
}
//checks to see if bounding box crosses 180 degrees
if (topLeft.lon() > bottomRight.lon()) {
@@ -42,7 +47,8 @@ public class IndexedGeoBoundingBoxQuery {
}
}
- private static Query westGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapperLegacy.GeoPointFieldType fieldType) {
+ private static Query westGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight,
+ LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
BooleanQuery.Builder filter = new BooleanQuery.Builder();
filter.setMinimumNumberShouldMatch(1);
filter.add(fieldType.lonFieldType().rangeQuery(null, bottomRight.lon(), true, true), Occur.SHOULD);
@@ -51,7 +57,8 @@ public class IndexedGeoBoundingBoxQuery {
return new ConstantScoreQuery(filter.build());
}
- private static Query eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapperLegacy.GeoPointFieldType fieldType) {
+ private static Query eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight,
+ LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
BooleanQuery.Builder filter = new BooleanQuery.Builder();
filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST);
filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST);
diff --git a/core/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/core/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java
index c8719a610e..0e46a56248 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java
@@ -19,17 +19,14 @@
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.IndexSettings;
-/**
- *
- */
public abstract class AbstractIndexShardComponent implements IndexShardComponent {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final DeprecationLogger deprecationLogger;
protected final ShardId shardId;
protected final IndexSettings indexSettings;
diff --git a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java
index 3cc4ea1152..a98384cee1 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java
@@ -19,13 +19,13 @@
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MergeTrigger;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfos;
import org.elasticsearch.Version;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@@ -46,7 +46,7 @@ import java.util.Map;
*/
public final class ElasticsearchMergePolicy extends MergePolicy {
- private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class);
+ private static Logger logger = Loggers.getLogger(ElasticsearchMergePolicy.class);
private final MergePolicy delegate;
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index 1dff3ad8b9..1eb21d55e2 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.CorruptIndexException;
@@ -41,16 +42,18 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric;
@@ -60,6 +63,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
@@ -111,9 +115,9 @@ import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.search.suggest.completion.CompletionFieldStats;
@@ -332,8 +336,21 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public void updatePrimaryTerm(final long newTerm) {
synchronized (mutex) {
if (newTerm != primaryTerm) {
- assert shardRouting.primary() == false : "a primary shard should never update it's term. shard: " + shardRouting
- + " current term [" + primaryTerm + "] new term [" + newTerm + "]";
+ // Note that due to cluster state batching an initializing primary shard term can failed and re-assigned
+ // in one state causing it's term to be incremented. Note that if both current shard state and new
+ // shard state are initializing, we could replace the current shard and reinitialize it. It is however
+ // possible that this shard is being started. This can happen if:
+ // 1) Shard is post recovery and sends shard started to the master
+ // 2) Node gets disconnected and rejoins
+ // 3) Master assigns the shard back to the node
+ // 4) Master processes the shard started and starts the shard
+ // 5) The node process the cluster state where the shard is both started and primary term is incremented.
+ //
+ // We could fail the shard in that case, but this will cause it to be removed from the insync allocations list
+ // potentially preventing re-allocation.
+ assert shardRouting.primary() == false || shardRouting.initializing() == false :
+ "a started primary shard should never update it's term. shard: " + shardRouting
+ + " current term [" + primaryTerm + "] new term [" + newTerm + "]";
assert newTerm > primaryTerm : "primary terms can only go up. current [" + primaryTerm + "], new [" + newTerm + "]";
primaryTerm = newTerm;
}
@@ -362,60 +379,46 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @throws IOException if shard state could not be persisted
*/
public void updateRoutingEntry(final ShardRouting newRouting) throws IOException {
- final ShardRouting currentRouting = this.shardRouting;
- if (!newRouting.shardId().equals(shardId())) {
- throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
- }
- if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
- throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
- }
- if (currentRouting != null) {
- if (!newRouting.primary() && currentRouting.primary()) {
- logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
+ final ShardRouting currentRouting;
+ synchronized (mutex) {
+ currentRouting = this.shardRouting;
+
+ if (!newRouting.shardId().equals(shardId())) {
+ throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
}
- // if its the same routing, return
- if (currentRouting.equals(newRouting)) {
- return;
+ if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
+ throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
+ }
+ if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) {
+ throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current "
+ + currentRouting + ", new " + newRouting);
}
- }
- if (state == IndexShardState.POST_RECOVERY) {
- // if the state is started or relocating (cause it might move right away from started to relocating)
- // then move to STARTED
- if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
+ if (state == IndexShardState.POST_RECOVERY && newRouting.active()) {
+ assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting;
// we want to refresh *before* we move to internal STARTED state
try {
getEngine().refresh("cluster_state_started");
} catch (Exception e) {
logger.debug("failed to refresh due to move to cluster wide started", e);
}
-
- boolean movedToStarted = false;
- synchronized (mutex) {
- // do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
- if (state == IndexShardState.POST_RECOVERY) {
- changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
- movedToStarted = true;
- } else {
- logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
- }
- }
- if (movedToStarted) {
- indexEventListener.afterIndexShardStarted(this);
- }
+ changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
+ } else if (state == IndexShardState.RELOCATED &&
+ (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
+ // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
+ // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
+ // active primaries.
+ throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
+ this.shardRouting = newRouting;
+ persistMetadata(newRouting, currentRouting);
}
-
- if (state == IndexShardState.RELOCATED &&
- (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
- // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
- // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
- // active primaries.
- throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
+ if (currentRouting != null && currentRouting.active() == false && newRouting.active()) {
+ indexEventListener.afterIndexShardStarted(this);
+ }
+ if (newRouting.equals(currentRouting) == false) {
+ indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
}
- this.shardRouting = newRouting;
- indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
- persistMetadata(newRouting, currentRouting);
}
/**
@@ -445,6 +448,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public void relocated(String reason) throws IllegalIndexShardStateException, InterruptedException {
+ assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
try {
indexShardOperationsLock.blockOperations(30, TimeUnit.MINUTES, () -> {
// no shard operation locks are being held here, move state from started to relocated
@@ -454,6 +458,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
+ // if the master cancelled the recovery, the target will be removed
+ // and the recovery will stopped.
+ // However, it is still possible that we concurrently end up here
+ // and therefore have to protect we don't mark the shard as relocated when
+ // its shard routing says otherwise.
+ if (shardRouting.relocating() == false) {
+ throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED,
+ ": shard is no longer relocating " + shardRouting);
+ }
changeState(IndexShardState.RELOCATED, reason);
}
});
@@ -486,27 +499,32 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return previousState;
}
- public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType) {
+ public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType, long autoGeneratedIdTimestamp,
+ boolean isRetry) {
try {
verifyPrimary();
- return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY);
+ return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY,
+ autoGeneratedIdTimestamp, isRetry);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
}
}
- public Engine.Index prepareIndexOnReplica(SourceToParse source, long version, VersionType versionType) {
+ public Engine.Index prepareIndexOnReplica(SourceToParse source, long version, VersionType versionType, long autoGeneratedIdTimestamp,
+ boolean isRetry) {
try {
verifyReplicationTarget();
- return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA);
+ return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA, autoGeneratedIdTimestamp,
+ isRetry);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
}
}
- static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) {
+ static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType,
+ Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry) {
long startTime = System.nanoTime();
ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
if (docMapper.getMapping() != null) {
@@ -515,37 +533,29 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
MappedFieldType uidFieldType = docMapper.getDocumentMapper().uidMapper().fieldType();
Query uidQuery = uidFieldType.termQuery(doc.uid(), null);
Term uid = MappedFieldType.extractTerm(uidQuery);
- return new Engine.Index(uid, doc, version, versionType, origin, startTime);
+ return new Engine.Index(uid, doc, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry);
}
- /**
- * Index a document and return whether it was created, as opposed to just
- * updated.
- */
- public boolean index(Engine.Index index) {
+ public void index(Engine.Index index) {
ensureWriteAllowed(index);
Engine engine = getEngine();
- return index(engine, index);
+ index(engine, index);
}
- private boolean index(Engine engine, Engine.Index index) {
+ private void index(Engine engine, Engine.Index index) {
active.set(true);
index = indexingOperationListeners.preIndex(index);
- final boolean created;
try {
if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
}
- created = engine.index(index);
+ engine.index(index);
index.endTime(System.nanoTime());
} catch (Exception e) {
indexingOperationListeners.postIndex(index, e);
throw e;
}
-
- indexingOperationListeners.postIndex(index, created);
-
- return created;
+ indexingOperationListeners.postIndex(index, index.isCreated());
}
public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) {
@@ -733,7 +743,12 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyStartedOrRecovering();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
- return getEngine().syncFlush(syncId, expectedCommitId);
+ Engine engine = getEngine();
+ if (engine.isRecovering()) {
+ throw new IllegalIndexShardStateException(shardId(), state, "syncFlush is only allowed if the engine is not recovery" +
+ " from translog");
+ }
+ return engine.syncFlush(syncId, expectedCommitId);
}
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException {
@@ -744,11 +759,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
// we allows flush while recovering, since we allow for operations to happen
// while recovering, and we want to keep the translog at bay (up to deletes, which
- // we don't gc).
+ // we don't gc). Yet, we don't use flush internally to clear deletes and flush the indexwriter since
+ // we use #writeIndexingBuffer for this now.
verifyStartedOrRecovering();
-
+ Engine engine = getEngine();
+ if (engine.isRecovering()) {
+ throw new IllegalIndexShardStateException(shardId(), state, "flush is only allowed if the engine is not recovery" +
+ " from translog");
+ }
long time = System.nanoTime();
- Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing);
+ Engine.CommitId commitId = engine.flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time);
return commitId;
@@ -957,11 +977,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
translogStats.totalOperations(0);
translogStats.totalOperationsOnStart(0);
}
- internalPerformTranslogRecovery(false, indexExists);
+ internalPerformTranslogRecovery(false, indexExists, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
}
- private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boolean indexExists) throws IOException {
+ private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boolean indexExists, long maxUnsafeAutoIdTimestamp) throws IOException {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
@@ -990,7 +1010,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} else {
openMode = EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG;
}
- final EngineConfig config = newEngineConfig(openMode);
+ final EngineConfig config = newEngineConfig(openMode, maxUnsafeAutoIdTimestamp);
// we disable deletes since we allow for operations to be executed against the shard while recovering
// but we need to make sure we don't loose deletes until we are done recovering
config.setEnableGcDeletes(false);
@@ -1002,7 +1022,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
active.set(true);
newEngine.recoverFromTranslog();
}
+ }
+ protected void onNewEngine(Engine newEngine) {
+ refreshListeners.setTranslog(newEngine.getTranslog());
}
/**
@@ -1010,9 +1033,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* the replay of the transaction log which is required in cases where we restore a previous index or recover from
* a remote peer.
*/
- public void skipTranslogRecovery() throws IOException {
+ public void skipTranslogRecovery(long maxUnsafeAutoIdTimestamp) throws IOException {
assert getEngineOrNull() == null : "engine was already created";
- internalPerformTranslogRecovery(true, true);
+ internalPerformTranslogRecovery(true, true, maxUnsafeAutoIdTimestamp);
assert recoveryState.getTranslog().recoveredOperations() == 0;
}
@@ -1165,7 +1188,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
boolean wasActive = active.getAndSet(false);
if (wasActive) {
logger.debug("shard is now inactive");
- indexEventListener.onShardInactive(this);
+ try {
+ indexEventListener.onShardInactive(this);
+ } catch (Exception e) {
+ logger.warn("failed to notify index event listener", e);
+ }
}
}
}
@@ -1179,6 +1206,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer, List<IndexShard> localShards) throws IOException {
+ assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
+ assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + recoveryState.getRecoverySource();
final List<LocalShardSnapshot> snapshots = new ArrayList<>();
try {
for (IndexShard shard : localShards) {
@@ -1199,14 +1228,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
- boolean shouldExist = shardRouting.allocatedPostIndexCreate(indexSettings.getIndexMetaData());
-
+ assert shardRouting.initializing() : "can only start recovery on initializing shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
- return storeRecovery.recoverFromStore(this, shouldExist);
+ return storeRecovery.recoverFromStore(this);
}
public boolean restoreFromRepository(Repository repository) {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
+ assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
return storeRecovery.recoverFromRepository(this, repository);
}
@@ -1220,7 +1249,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (engine != null) {
try {
Translog translog = engine.getTranslog();
- return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().bytes();
+ return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().getBytes();
} catch (AlreadyClosedException | EngineClosedException ex) {
// that's fine we are already close - no need to flush
}
@@ -1422,26 +1451,57 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return this.currentEngineReference.get();
}
- public void startRecovery(RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
- RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService,
+ PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
BiConsumer<String, MappingMetaData> mappingUpdateConsumer,
IndicesService indicesService) {
- switch (recoveryState.getType()) {
- case PRIMARY_RELOCATION:
- case REPLICA:
+ // TODO: Create a proper object to encapsulate the recovery context
+ // all of the current methods here follow a pattern of:
+ // resolve context which isn't really dependent on the local shards and then async
+ // call some external method with this pointer.
+ // with a proper recovery context object we can simply change this to:
+ // startRecovery(RecoveryState recoveryState, ShardRecoverySource source ) {
+ // markAsRecovery("from " + source.getShortDescription(), recoveryState);
+ // threadPool.generic().execute() {
+ // onFailure () { listener.failure() };
+ // doRun() {
+ // if (source.recover(this)) {
+ // recoveryListener.onRecoveryDone(recoveryState);
+ // }
+ // }
+ // }}
+ // }
+ assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource());
+ switch (recoveryState.getRecoverySource().getType()) {
+ case EMPTY_STORE:
+ case EXISTING_STORE:
+ markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread
+ threadPool.generic().execute(() -> {
+ try {
+ if (recoverFromStore()) {
+ recoveryListener.onRecoveryDone(recoveryState);
+ }
+ } catch (Exception e) {
+ recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
+ }
+ });
+ break;
+ case PEER:
try {
markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState);
- recoveryTargetService.startRecovery(this, recoveryState.getType(), recoveryState.getSourceNode(), recoveryListener);
+ recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
}
break;
- case STORE:
- markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread
+ case SNAPSHOT:
+ markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
+ SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource();
threadPool.generic().execute(() -> {
try {
- if (recoverFromStore()) {
+ final Repository repository = repositoriesService.repository(recoverySource.snapshot().getRepository());
+ if (restoreFromRepository(repository)) {
recoveryListener.onRecoveryDone(recoveryState);
}
} catch (Exception e) {
@@ -1467,7 +1527,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
threadPool.generic().execute(() -> {
try {
final Set<ShardId> shards = IndexMetaData.selectShrinkShards(shardId().id(), sourceIndexService.getMetaData(),
- + indexMetaData.getNumberOfShards());
+ +indexMetaData.getNumberOfShards());
if (recoverFromLocalShards(mappingUpdateConsumer, startedShards.stream()
.filter((s) -> shards.contains(s.shardId())).collect(Collectors.toList()))) {
recoveryListener.onRecoveryDone(recoveryState);
@@ -1489,22 +1549,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
}
break;
- case SNAPSHOT:
- markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
- threadPool.generic().execute(() -> {
- try {
- final Repository repository = repositoriesService.repository(
- recoveryState.getRestoreSource().snapshot().getRepository());
- if (restoreFromRepository(repository)) {
- recoveryListener.onRecoveryDone(recoveryState);
- }
- } catch (Exception first) {
- recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, first), true);
- }
- });
- break;
default:
- throw new IllegalArgumentException("Unknown recovery type " + recoveryState.getType());
+ throw new IllegalArgumentException("Unknown recovery source " + recoveryState.getRecoverySource());
}
}
@@ -1532,8 +1578,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
throw new EngineClosedException(shardId);
}
assert this.currentEngineReference.get() == null;
- this.currentEngineReference.set(newEngine(config));
-
+ Engine engine = newEngine(config);
+ onNewEngine(engine); // call this before we pass the memory barrier otherwise actions that happen
+ // inside the callback are not visible. This one enforces happens-before
+ this.currentEngineReference.set(engine);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during which
@@ -1582,12 +1630,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return mapperService.documentMapperWithAutoCreate(type);
}
- private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode) {
+ private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, long maxUnsafeAutoIdTimestamp) {
final IndexShardRecoveryPerformer translogRecoveryPerformer = new IndexShardRecoveryPerformer(shardId, mapperService, logger);
return new EngineConfig(openMode, shardId,
threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(),
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig,
- IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners);
+ IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners,
+ maxUnsafeAutoIdTimestamp);
}
/**
@@ -1622,19 +1671,34 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return indexShardOperationsLock.getActiveOperationsCount(); // refCount is incremented on successful acquire and decremented on close
}
+ private final AsyncIOProcessor<Translog.Location> translogSyncProcessor = new AsyncIOProcessor<Translog.Location>(logger, 1024) {
+ @Override
+ protected void write(List<Tuple<Translog.Location, Consumer<Exception>>> candidates) throws IOException {
+ try {
+ final Engine engine = getEngine();
+ engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1));
+ } catch (EngineClosedException ex) {
+ // that's fine since we already synced everything on engine close - this also is conform with the methods
+ // documentation
+ } catch (IOException ex) { // if this fails we are in deep shit - fail the request
+ logger.debug("failed to sync translog", ex);
+ throw ex;
+ }
+ }
+ };
+
/**
- * Syncs the given location with the underlying storage unless already synced.
+ * Syncs the given location with the underlying storage unless already synced. This method might return immediately without
+ * actually fsyncing the location until the sync listener is called. Yet, unless there is already another thread fsyncing
+ * the transaction log the caller thread will be hijacked to run the fsync for all pending fsync operations.
+ * This method allows indexing threads to continue indexing without blocking on fsync calls. We ensure that there is only
+ * one thread blocking on the sync an all others can continue indexing.
+ * NOTE: if the syncListener throws an exception when it's processed the exception will only be logged. Users should make sure that the
+ * listener handles all exception cases internally.
*/
- public void sync(Translog.Location location) {
- try {
- final Engine engine = getEngine();
- engine.getTranslog().ensureSynced(location);
- } catch (EngineClosedException ex) {
- // that's fine since we already synced everything on engine close - this also is conform with the methods documentation
- } catch (IOException ex) { // if this fails we are in deep shit - fail the request
- logger.debug("failed to sync translog", ex);
- throw new ElasticsearchException("failed to sync translog", ex);
- }
+ public final void sync(Translog.Location location, Consumer<Exception> syncListener) {
+ verifyNotClosed();
+ translogSyncProcessor.put(location, syncListener);
}
/**
@@ -1748,7 +1812,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer {
- protected IndexShardRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) {
+ protected IndexShardRecoveryPerformer(ShardId shardId, MapperService mapperService, Logger logger) {
super(shardId, mapperService, logger);
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java
index a0f2714a3a..cde14dec17 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java
@@ -18,11 +18,11 @@
*/
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
@@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class IndexShardOperationsLock implements Closeable {
private final ShardId shardId;
- private final ESLogger logger;
+ private final Logger logger;
private final ThreadPool threadPool;
private static final int TOTAL_PERMITS = Integer.MAX_VALUE;
@@ -44,7 +44,7 @@ public class IndexShardOperationsLock implements Closeable {
@Nullable private List<ActionListener<Releasable>> delayedOperations; // operations that are delayed due to relocation hand-off
private volatile boolean closed;
- public IndexShardOperationsLock(ShardId shardId, ESLogger logger, ThreadPool threadPool) {
+ public IndexShardOperationsLock(ShardId shardId, Logger logger, ThreadPool threadPool) {
this.shardId = shardId;
this.logger = logger;
this.threadPool = threadPool;
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java
index 13ff87d418..042ddec924 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java
@@ -18,7 +18,9 @@
*/
package org.elasticsearch.index.shard;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.index.engine.Engine;
import java.util.List;
@@ -68,9 +70,9 @@ public interface IndexingOperationListener {
*/
final class CompositeListener implements IndexingOperationListener{
private final List<IndexingOperationListener> listeners;
- private final ESLogger logger;
+ private final Logger logger;
- public CompositeListener(List<IndexingOperationListener> listeners, ESLogger logger) {
+ public CompositeListener(List<IndexingOperationListener> listeners, Logger logger) {
this.listeners = listeners;
this.logger = logger;
}
@@ -82,7 +84,7 @@ public interface IndexingOperationListener {
try {
listener.preIndex(operation);
} catch (Exception e) {
- logger.warn("preIndex listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e);
}
}
return operation;
@@ -95,7 +97,7 @@ public interface IndexingOperationListener {
try {
listener.postIndex(index, created);
} catch (Exception e) {
- logger.warn("postIndex listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
}
}
}
@@ -108,7 +110,7 @@ public interface IndexingOperationListener {
listener.postIndex(index, ex);
} catch (Exception inner) {
inner.addSuppressed(ex);
- logger.warn("postIndex listener [{}] failed", inner, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner);
}
}
}
@@ -120,7 +122,7 @@ public interface IndexingOperationListener {
try {
listener.preDelete(delete);
} catch (Exception e) {
- logger.warn("preDelete listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e);
}
}
return delete;
@@ -133,7 +135,7 @@ public interface IndexingOperationListener {
try {
listener.postDelete(delete);
} catch (Exception e) {
- logger.warn("postDelete listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
}
}
}
@@ -146,7 +148,7 @@ public interface IndexingOperationListener {
listener.postDelete(delete, ex);
} catch (Exception inner) {
inner.addSuppressed(ex);
- logger.warn("postDelete listener [{}] failed", inner, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java
index 0d53163f15..7b79f785ff 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java
@@ -85,7 +85,7 @@ final class LocalShardSnapshot implements Closeable {
}
@Override
- public void renameFile(String source, String dest) throws IOException {
+ public void rename(String source, String dest) throws IOException {
throw new UnsupportedOperationException("this directory is read-only");
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java
index 76352e79bb..ca94f1ea96 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java
@@ -19,9 +19,9 @@
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.ReferenceManager;
import org.elasticsearch.common.collect.Tuple;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
@@ -41,7 +41,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
private final IntSupplier getMaxRefreshListeners;
private final Runnable forceRefresh;
private final Executor listenerExecutor;
- private final ESLogger logger;
+ private final Logger logger;
/**
* List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed
@@ -54,7 +54,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
*/
private volatile Translog.Location lastRefreshedLocation;
- public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, ESLogger logger) {
+ public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, Logger logger) {
this.getMaxRefreshListeners = getMaxRefreshListeners;
this.forceRefresh = forceRefresh;
this.listenerExecutor = listenerExecutor;
diff --git a/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java
index 5a4ac1297f..11723c3d50 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java
@@ -18,7 +18,9 @@
*/
package org.elasticsearch.index.shard;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.search.internal.SearchContext;
import java.util.List;
@@ -107,9 +109,9 @@ public interface SearchOperationListener {
*/
final class CompositeListener implements SearchOperationListener {
private final List<SearchOperationListener> listeners;
- private final ESLogger logger;
+ private final Logger logger;
- public CompositeListener(List<SearchOperationListener> listeners, ESLogger logger) {
+ public CompositeListener(List<SearchOperationListener> listeners, Logger logger) {
this.listeners = listeners;
this.logger = logger;
}
@@ -120,7 +122,7 @@ public interface SearchOperationListener {
try {
listener.onPreQueryPhase(searchContext);
} catch (Exception e) {
- logger.warn("onPreQueryPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e);
}
}
}
@@ -131,7 +133,7 @@ public interface SearchOperationListener {
try {
listener.onFailedQueryPhase(searchContext);
} catch (Exception e) {
- logger.warn("onFailedQueryPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e);
}
}
}
@@ -142,7 +144,7 @@ public interface SearchOperationListener {
try {
listener.onQueryPhase(searchContext, tookInNanos);
} catch (Exception e) {
- logger.warn("onQueryPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e);
}
}
}
@@ -153,7 +155,7 @@ public interface SearchOperationListener {
try {
listener.onPreFetchPhase(searchContext);
} catch (Exception e) {
- logger.warn("onPreFetchPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e);
}
}
}
@@ -164,7 +166,7 @@ public interface SearchOperationListener {
try {
listener.onFailedFetchPhase(searchContext);
} catch (Exception e) {
- logger.warn("onFailedFetchPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e);
}
}
}
@@ -175,7 +177,7 @@ public interface SearchOperationListener {
try {
listener.onFetchPhase(searchContext, tookInNanos);
} catch (Exception e) {
- logger.warn("onFetchPhase listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e);
}
}
}
@@ -186,7 +188,7 @@ public interface SearchOperationListener {
try {
listener.onNewContext(context);
} catch (Exception e) {
- logger.warn("onNewContext listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e);
}
}
}
@@ -197,7 +199,7 @@ public interface SearchOperationListener {
try {
listener.onFreeContext(context);
} catch (Exception e) {
- logger.warn("onFreeContext listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e);
}
}
}
@@ -208,7 +210,7 @@ public interface SearchOperationListener {
try {
listener.onNewScrollContext(context);
} catch (Exception e) {
- logger.warn("onNewScrollContext listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e);
}
}
}
@@ -219,7 +221,7 @@ public interface SearchOperationListener {
try {
listener.onFreeScrollContext(context);
} catch (Exception e) {
- logger.warn("onFreeScrollContext listener [{}] failed", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
index 45a471e1aa..11023a6a13 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
@@ -114,4 +114,9 @@ public final class ShadowIndexShard extends IndexShard {
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us");
}
+
+ @Override
+ protected void onNewEngine(Engine newEngine) {
+ // nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
index fa2c8ce710..aa46240fd4 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
@@ -33,10 +33,18 @@ public class ShardNotFoundException extends ResourceNotFoundException {
}
public ShardNotFoundException(ShardId shardId, Throwable ex) {
- super("no such shard", ex);
- setShard(shardId);
+ this(shardId, "no such shard", ex);
+ }
+
+ public ShardNotFoundException(ShardId shardId, String msg, Object... args) {
+ this(shardId, msg, null, args);
+ }
+ public ShardNotFoundException(ShardId shardId, String msg, Throwable ex, Object... args) {
+ super(msg, ex, args);
+ setShard(shardId);
}
+
public ShardNotFoundException(StreamInput in) throws IOException{
super(in);
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java
index 154619951f..23b17c290f 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java
@@ -18,9 +18,9 @@
*/
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.index.IndexSettings;
@@ -108,7 +108,7 @@ public final class ShardPath {
* directories with a valid shard state exist the one with the highest version will be used.
* <b>Note:</b> this method resolves custom data locations for the shard.
*/
- public static ShardPath loadShardPath(ESLogger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException {
+ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException {
final String indexUUID = indexSettings.getUUID();
final Path[] paths = env.availableShardPaths(shardId);
Path loadedPath = null;
@@ -146,7 +146,7 @@ public final class ShardPath {
* This method tries to delete left-over shards where the index name has been reused but the UUID is different
* to allow the new shard to be allocated.
*/
- public static void deleteLeftoverShardDirectory(ESLogger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException {
+ public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException {
final String indexUUID = indexSettings.getUUID();
final Path[] paths = env.availableShardPaths(lock.getShardId());
for (Path path : paths) {
diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
index e08130d9d8..44b4ed933f 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.shard;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
@@ -29,11 +30,11 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
-import org.elasticsearch.cluster.routing.RestoreSource;
-import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@@ -62,10 +63,10 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
*/
final class StoreRecovery {
- private final ESLogger logger;
+ private final Logger logger;
private final ShardId shardId;
- StoreRecovery(ShardId shardId, ESLogger logger) {
+ StoreRecovery(ShardId shardId, Logger logger) {
this.logger = logger;
this.shardId = shardId;
}
@@ -75,19 +76,18 @@ final class StoreRecovery {
* exist on disk ie. has been previously allocated or if the shard is a brand new allocation without pre-existing index
* files / transaction logs. This
* @param indexShard the index shard instance to recovery the shard into
- * @param indexShouldExists <code>true</code> iff the index should exist on disk ie. has the shard been allocated previously on the shards store.
* @return <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
* @see Store
*/
- boolean recoverFromStore(final IndexShard indexShard, final boolean indexShouldExists) {
+ boolean recoverFromStore(final IndexShard indexShard) {
if (canRecover(indexShard)) {
- if (indexShard.routingEntry().restoreSource() != null) {
- throw new IllegalStateException("can't recover - restore source is not null");
- }
+ RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
+ assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE :
+ "expected store recovery type but was: " + recoveryType;
return executeRecovery(indexShard, () -> {
logger.debug("starting recovery from store ...");
- internalRecoverFromStore(indexShard, indexShouldExists);
+ internalRecoverFromStore(indexShard);
});
}
return false;
@@ -95,10 +95,8 @@ final class StoreRecovery {
boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer, final IndexShard indexShard, final List<LocalShardSnapshot> shards) throws IOException {
if (canRecover(indexShard)) {
- assert indexShard.recoveryState().getType() == RecoveryState.Type.LOCAL_SHARDS : "invalid recovery type: " + indexShard.recoveryState().getType();
- if (indexShard.routingEntry().restoreSource() != null) {
- throw new IllegalStateException("can't recover - restore source is not null");
- }
+ RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
+ assert recoveryType == RecoverySource.Type.LOCAL_SHARDS: "expected local shards recovery type: " + recoveryType;
if (shards.isEmpty()) {
throw new IllegalArgumentException("shards must not be empty");
}
@@ -118,7 +116,7 @@ final class StoreRecovery {
final Directory directory = indexShard.store().directory(); // don't close this directory!!
addIndices(indexShard.recoveryState().getIndex(), directory, shards.stream().map(s -> s.getSnapshotDirectory())
.collect(Collectors.toList()).toArray(new Directory[shards.size()]));
- internalRecoverFromStore(indexShard, true);
+ internalRecoverFromStore(indexShard);
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
indexShard.getEngine().forceMerge(false, -1, false, false, false);
@@ -229,13 +227,12 @@ final class StoreRecovery {
*/
boolean recoverFromRepository(final IndexShard indexShard, Repository repository) {
if (canRecover(indexShard)) {
- final ShardRouting shardRouting = indexShard.routingEntry();
- if (shardRouting.restoreSource() == null) {
- throw new IllegalStateException("can't restore - restore source is null");
- }
+ RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
+ assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType;
+ SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource();
return executeRecovery(indexShard, () -> {
- logger.debug("restoring from {} ...", shardRouting.restoreSource());
- restore(indexShard, repository);
+ logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource());
+ restore(indexShard, repository, recoverySource);
});
}
return false;
@@ -308,8 +305,9 @@ final class StoreRecovery {
/**
* Recovers the state of the shard from the store.
*/
- private void internalRecoverFromStore(IndexShard indexShard, boolean indexShouldExists) throws IndexShardRecoveryException {
+ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRecoveryException {
final RecoveryState recoveryState = indexShard.recoveryState();
+ final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE;
indexShard.prepareForIndexRecovery();
long version = -1;
SegmentInfos si = null;
@@ -340,25 +338,26 @@ final class StoreRecovery {
// its a "new index create" API, we have to do something, so better to clean it than use same data
logger.trace("cleaning existing shard, shouldn't exists");
Lucene.cleanLuceneIndex(store.directory());
+ si = null;
}
}
} catch (Exception e) {
throw new IndexShardRecoveryException(shardId, "failed to fetch index version after copying it over", e);
}
recoveryState.getIndex().updateVersion(version);
- // since we recover from local, just fill the files and size
- try {
- final RecoveryState.Index index = recoveryState.getIndex();
- if (si != null && recoveryState.getType() == RecoveryState.Type.STORE) {
- addRecoveredFileDetails(si, store, index);
- }
- } catch (IOException e) {
- logger.debug("failed to list file details", e);
- }
- if (recoveryState.getType() == RecoveryState.Type.LOCAL_SHARDS) {
+ if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
assert indexShouldExists;
- indexShard.skipTranslogRecovery();
+ indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
} else {
+ // since we recover from local, just fill the files and size
+ try {
+ final RecoveryState.Index index = recoveryState.getIndex();
+ if (si != null) {
+ addRecoveredFileDetails(si, store, index);
+ }
+ } catch (IOException e) {
+ logger.debug("failed to list file details", e);
+ }
indexShard.performTranslogRecovery(indexShouldExists);
}
indexShard.finalizeRecovery();
@@ -379,10 +378,9 @@ final class StoreRecovery {
}
/**
- * Restores shard from {@link RestoreSource} associated with this shard in routing table
+ * Restores shard from {@link SnapshotRecoverySource} associated with this shard in routing table
*/
- private void restore(final IndexShard indexShard, final Repository repository) {
- RestoreSource restoreSource = indexShard.routingEntry().restoreSource();
+ private void restore(final IndexShard indexShard, final Repository repository, final SnapshotRecoverySource restoreSource) {
final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog();
if (restoreSource == null) {
throw new IndexShardRestoreFailedException(shardId, "empty restore source");
@@ -401,7 +399,7 @@ final class StoreRecovery {
}
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
- indexShard.skipTranslogRecovery();
+ indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
indexShard.finalizeRecovery();
indexShard.postRecovery("restore done");
} catch (Exception e) {
diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
index 78628a02c4..64ae0c7700 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
@@ -18,10 +18,10 @@
*/
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException;
import org.elasticsearch.index.mapper.DocumentMapperForType;
@@ -44,11 +44,11 @@ import static org.elasticsearch.index.mapper.SourceToParse.source;
*/
public class TranslogRecoveryPerformer {
private final MapperService mapperService;
- private final ESLogger logger;
+ private final Logger logger;
private final Map<String, Mapping> recoveredTypes = new HashMap<>();
private final ShardId shardId;
- protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) {
+ protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, Logger logger) {
this.shardId = shardId;
this.mapperService = mapperService;
this.logger = logger;
@@ -147,13 +147,16 @@ public class TranslogRecoveryPerformer {
* is encountered.
*/
private void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates, Engine.Operation.Origin origin) {
+
try {
switch (operation.opType()) {
case INDEX:
Translog.Index index = (Translog.Index) operation;
+ // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
+ // autoGeneratedID docs that are coming from the primary are updated correctly.
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(shardId.getIndexName(), index.type(), index.id(), index.source())
.routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()),
- index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin);
+ index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates);
if (logger.isTraceEnabled()) {
logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id());
diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java
index 7bb81a0109..a8b7fafb98 100644
--- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java
+++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java
@@ -128,26 +128,15 @@ public final class SimilarityService extends AbstractIndexComponent {
static class PerFieldSimilarity extends PerFieldSimilarityWrapper {
private final Similarity defaultSimilarity;
- private final Similarity baseSimilarity;
private final MapperService mapperService;
PerFieldSimilarity(Similarity defaultSimilarity, Similarity baseSimilarity, MapperService mapperService) {
+ super(baseSimilarity);
this.defaultSimilarity = defaultSimilarity;
- this.baseSimilarity = baseSimilarity;
this.mapperService = mapperService;
}
@Override
- public float coord(int overlap, int maxOverlap) {
- return baseSimilarity.coord(overlap, maxOverlap);
- }
-
- @Override
- public float queryNorm(float valueForNormalization) {
- return baseSimilarity.queryNorm(valueForNormalization);
- }
-
- @Override
public Similarity get(String name) {
MappedFieldType fieldType = mapperService.fullName(name);
return (fieldType != null && fieldType.similarity() != null) ? fieldType.similarity().get() : defaultSimilarity;
diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
index 5bb0f728bc..91636b5329 100644
--- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
+++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
@@ -70,7 +70,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
long partBytes = Long.MAX_VALUE;
if (partSize != null) {
- partBytes = partSize.bytes();
+ partBytes = partSize.getBytes();
}
long totalLength = metaData.length();
@@ -261,7 +261,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
builder.field(CHECKSUM, file.metadata.checksum());
}
if (file.partSize != null) {
- builder.field(PART_SIZE, file.partSize.bytes());
+ builder.field(PART_SIZE, file.partSize.getBytes());
}
if (file.metadata.writtenBy() != null) {
diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java
index 9e01d87176..783bd9af58 100644
--- a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java
+++ b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java
@@ -26,9 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardPath;
-/**
- *
- */
+
public class IndexStore extends AbstractIndexComponent {
public static final Setting<IndexRateLimitingType> INDEX_STORE_THROTTLE_TYPE_SETTING =
new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString,
diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
index 12558bb955..ff1f624070 100644
--- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
@@ -18,8 +18,8 @@
*/
package org.elasticsearch.index.store;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.StoreRateLimiting;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -49,7 +49,7 @@ public class IndexStoreConfig {
private volatile StoreRateLimiting.Type rateLimitingType;
private volatile ByteSizeValue rateLimitingThrottle;
private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
- private final ESLogger logger;
+ private final Logger logger;
public IndexStoreConfig(Settings settings) {
logger = Loggers.getLogger(IndexStoreConfig.class, settings);
// we don't limit by default (we default to CMS's auto throttle instead):
diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java
index e714d8db8b..606510ace4 100644
--- a/core/src/main/java/org/elasticsearch/index/store/Store.java
+++ b/core/src/main/java/org/elasticsearch/index/store/Store.java
@@ -19,6 +19,9 @@
package org.elasticsearch.index.store;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
@@ -54,7 +57,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
@@ -217,7 +219,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* Note that this method requires the caller verify it has the right to access the store and
* no concurrent file changes are happening. If in doubt, you probably want to use one of the following:
*
- * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, ESLogger)} to read a meta data while locking
+ * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking
* {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard
* {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed
*
@@ -245,7 +247,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
/**
- * Renames all the given files form the key of the map to the
+ * Renames all the given files from the key of the map to the
* value of the map. All successfully renamed files are removed from the map in-place.
*/
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
@@ -279,13 +281,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
directory.deleteFile(origFile);
} catch (FileNotFoundException | NoSuchFileException e) {
} catch (Exception ex) {
- logger.debug("failed to delete file [{}]", ex, origFile);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
}
// now, rename the files... and fail it it won't work
- this.renameFile(tempFile, origFile);
+ directory.rename(tempFile, origFile);
final String remove = tempFileMap.remove(tempFile);
assert remove != null;
}
+ directory.syncMetaData();
} finally {
metadataLock.writeLock().unlock();
}
@@ -297,11 +300,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
return statsCache.getOrRefresh();
}
- public void renameFile(String from, String to) throws IOException {
- ensureOpen();
- directory.renameFile(from, to);
- }
-
/**
* Increments the refCount of this Store instance. RefCounts are used to determine when a
* Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a
@@ -380,7 +378,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* @throws IOException if the index we try to read is corrupted
*/
public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
- ESLogger logger) throws IOException {
+ Logger logger) throws IOException {
try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
failIfCorrupted(dir, shardId);
@@ -390,7 +388,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
} catch (FileNotFoundException | NoSuchFileException ex) {
logger.info("Failed to open / find files while reading metadata snapshot");
} catch (ShardLockObtainFailedException ex) {
- logger.info("{}: failed to obtain shard lock", ex, shardId);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex);
}
return MetadataSnapshot.EMPTY;
}
@@ -400,11 +398,11 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* can be successfully opened. This includes reading the segment infos and possible
* corruption markers.
*/
- public static boolean canOpenIndex(ESLogger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
+ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
try {
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
} catch (Exception ex) {
- logger.trace("Can't open index for path [{}]", ex, indexLocation);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
return false;
}
return true;
@@ -415,14 +413,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* segment infos and possible corruption markers. If the index can not
* be opened, an exception is thrown
*/
- public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, ESLogger logger) throws IOException {
+ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException {
try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
failIfCorrupted(dir, shardId);
SegmentInfos segInfo = Lucene.readSegmentInfos(dir);
logger.trace("{} loaded segment info [{}]", shardId, segInfo);
} catch (ShardLockObtainFailedException ex) {
- logger.error("{} unable to acquire shard lock", ex, shardId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unable to acquire shard lock", shardId), ex);
throw new IOException(ex);
}
}
@@ -609,7 +607,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
}
- logger.debug("failed to delete file [{}]", ex, existingFile);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
// ignore, we don't really care, will get deleted later on
}
}
@@ -656,9 +654,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
static final class StoreDirectory extends FilterDirectory {
- private final ESLogger deletesLogger;
+ private final Logger deletesLogger;
- StoreDirectory(Directory delegateDirectory, ESLogger deletesLogger) throws IOException {
+ StoreDirectory(Directory delegateDirectory, Logger deletesLogger) throws IOException {
super(delegateDirectory);
this.deletesLogger = deletesLogger;
}
@@ -721,7 +719,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
numDocs = 0;
}
- MetadataSnapshot(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
+ MetadataSnapshot(IndexCommit commit, Directory directory, Logger logger) throws IOException {
LoadedMetadata loadedMetadata = loadMetadata(commit, directory, logger);
metadata = loadedMetadata.fileMetadata;
commitUserData = loadedMetadata.userData;
@@ -784,7 +782,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
}
- static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
+ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logger logger) throws IOException {
long numDocs;
Map<String, StoreFileMetaData> builder = new HashMap<>();
Map<String, String> commitUserDataBuilder = new HashMap<>();
@@ -827,8 +825,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
// Lucene checks the checksum after it tries to lookup the codec etc.
// in that case we might get only IAE or similar exceptions while we are really corrupt...
// TODO we should check the checksum in lucene if we hit an exception
- logger.warn("failed to build store metadata. checking segment info integrity (with commit [{}])",
- ex, commit == null ? "no" : "yes");
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
Lucene.checkSegmentInfoIntegrity(directory);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
cex.addSuppressed(ex);
@@ -843,7 +840,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder,
- ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
+ Logger logger, Version version, boolean readFileAsHash) throws IOException {
final String checksum;
final BytesRefBuilder fileHash = new BytesRefBuilder();
try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
@@ -863,7 +860,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
} catch (Exception ex) {
- logger.debug("Can retrieve checksum from file [{}]", ex, file);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex);
throw ex;
}
builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
diff --git a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
index daaf01999b..671178dfcc 100644
--- a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
+++ b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
@@ -35,6 +35,8 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.get.GetField;
import org.elasticsearch.index.get.GetResult;
@@ -44,6 +46,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.StringFieldMapper;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.Uid;
@@ -55,8 +58,10 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@@ -82,16 +87,9 @@ public class TermVectorsService {
Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), uidTerm).version(request.version()).versionType(request.versionType()));
Fields termVectorsByField = null;
- boolean docFromTranslog = get.source() != null;
AggregatedDfs dfs = null;
TermVectorsFilter termVectorsFilter = null;
- /* fetched from translog is treated as an artificial document */
- if (docFromTranslog) {
- request.doc(get.source().source, false);
- termVectorsResponse.setDocVersion(get.version());
- }
-
/* handle potential wildcards in fields */
if (request.selectedFields() != null) {
handleFieldWildcards(indexShard, request);
@@ -103,12 +101,12 @@ public class TermVectorsService {
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
/* from an artificial document */
if (request.doc() != null) {
- termVectorsByField = generateTermVectorsFromDoc(indexShard, request, !docFromTranslog);
+ termVectorsByField = generateTermVectorsFromDoc(indexShard, request);
// if no document indexed in shard, take the queried document itself for stats
if (topLevelFields == null) {
topLevelFields = termVectorsByField;
}
- termVectorsResponse.setArtificial(!docFromTranslog);
+ termVectorsResponse.setArtificial(true);
termVectorsResponse.setExists(true);
}
/* or from an existing document */
@@ -198,9 +196,11 @@ public class TermVectorsService {
}
/* generate term vectors from fetched document fields */
+ String[] getFields = validFields.toArray(new String[validFields.size() + 1]);
+ getFields[getFields.length - 1] = SourceFieldMapper.NAME;
GetResult getResult = indexShard.getService().get(
- get, request.id(), request.type(), validFields.toArray(Strings.EMPTY_ARRAY), null);
- Fields generatedTermVectors = generateTermVectors(indexShard, getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
+ get, request.id(), request.type(), getFields, null);
+ Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
/* merge with existing Fields */
if (termVectorsByField == null) {
@@ -214,12 +214,12 @@ public class TermVectorsService {
MapperService mapperService = indexShard.mapperService();
Analyzer analyzer;
if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
- analyzer = mapperService.analysisService().analyzer(perFieldAnalyzer.get(field).toString());
+ analyzer = mapperService.getIndexAnalyzers().get(perFieldAnalyzer.get(field).toString());
} else {
analyzer = mapperService.fullName(field).indexAnalyzer();
}
if (analyzer == null) {
- analyzer = mapperService.analysisService().defaultIndexAnalyzer();
+ analyzer = mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
return analyzer;
}
@@ -234,25 +234,43 @@ public class TermVectorsService {
return selectedFields;
}
- private static Fields generateTermVectors(IndexShard indexShard, Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields) throws IOException {
- /* store document in memory index */
- MemoryIndex index = new MemoryIndex(withOffsets);
+ private static Fields generateTermVectors(IndexShard indexShard, Map<String, Object> source, Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields) throws IOException {
+ Map<String, Collection<Object>> values = new HashMap<>();
for (GetField getField : getFields) {
String field = getField.getName();
- if (fields.contains(field) == false) {
- // some fields are returned even when not asked for, eg. _timestamp
- continue;
+ if (fields.contains(field)) { // some fields are returned even when not asked for, eg. _timestamp
+ values.put(field, getField.getValues());
+ }
+ }
+ if (source != null) {
+ for (String field : fields) {
+ if (values.containsKey(field) == false) {
+ List<Object> v = XContentMapValues.extractRawValues(field, source);
+ if (v.isEmpty() == false) {
+ values.put(field, v);
+ }
+ }
}
+ }
+
+ /* store document in memory index */
+ MemoryIndex index = new MemoryIndex(withOffsets);
+ for (Map.Entry<String, Collection<Object>> entry : values.entrySet()) {
+ String field = entry.getKey();
Analyzer analyzer = getAnalyzerAtField(indexShard, field, perFieldAnalyzer);
- for (Object text : getField.getValues()) {
- index.addField(field, text.toString(), analyzer);
+ if (entry.getValue() instanceof List) {
+ for (Object text : entry.getValue()) {
+ index.addField(field, text.toString(), analyzer);
+ }
+ } else {
+ index.addField(field, entry.getValue().toString(), analyzer);
}
}
/* and read vectors from it */
return MultiFields.getFields(index.createSearcher().getIndexReader());
}
- private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request, boolean doAllFields) throws IOException {
+ private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException {
// parse the document, at the moment we do update the mapping, just like percolate
ParsedDocument parsedDocument = parseDocument(indexShard, indexShard.shardId().getIndexName(), request.type(), request.doc());
@@ -265,9 +283,6 @@ public class TermVectorsService {
if (!isValidField(fieldType)) {
continue;
}
- if (request.selectedFields() == null && !doAllFields && !fieldType.storeTermVectors()) {
- continue;
- }
if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) {
continue;
}
@@ -280,7 +295,7 @@ public class TermVectorsService {
String[] values = doc.getValues(field.name());
getFields.add(new GetField(field.name(), Arrays.asList((Object[]) values)));
}
- return generateTermVectors(indexShard, getFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
+ return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true).v2(), getFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
}
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc) {
diff --git a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
index 49e8249e9d..bf61febb74 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
@@ -58,34 +58,21 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
return firstOperationOffset;
}
- public Translog.Operation read(Translog.Location location) throws IOException {
- assert location.generation == generation : "read location's translog generation [" + location.generation + "] is not [" + generation + "]";
- ByteBuffer buffer = ByteBuffer.allocate(location.size);
- try (BufferedChecksumStreamInput checksumStreamInput = checksummedStream(buffer, location.translogLocation, location.size, null)) {
- return read(checksumStreamInput);
- }
- }
-
/** read the size of the op (i.e., number of bytes, including the op size) written at the given position */
- protected final int readSize(ByteBuffer reusableBuffer, long position) {
+ protected final int readSize(ByteBuffer reusableBuffer, long position) throws IOException {
// read op size from disk
assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]";
- try {
- reusableBuffer.clear();
- reusableBuffer.limit(4);
- readBytes(reusableBuffer, position);
- reusableBuffer.flip();
- // Add an extra 4 to account for the operation size integer itself
- final int size = reusableBuffer.getInt() + 4;
- final long maxSize = sizeInBytes() - position;
- if (size < 0 || size > maxSize) {
- throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
- }
-
- return size;
- } catch (IOException e) {
- throw new ElasticsearchException("unexpected exception reading from translog snapshot of " + this.path, e);
+ reusableBuffer.clear();
+ reusableBuffer.limit(4);
+ readBytes(reusableBuffer, position);
+ reusableBuffer.flip();
+ // Add an extra 4 to account for the operation size integer itself
+ final int size = reusableBuffer.getInt() + 4;
+ final long maxSize = sizeInBytes() - position;
+ if (size < 0 || size > maxSize) {
+ throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
}
+ return size;
}
public Translog.Snapshot newSnapshot() {
diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
index b6ace07a55..056716a29b 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
@@ -19,13 +19,14 @@
package org.elasticsearch.index.translog;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TwoPhaseCommit;
import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@@ -55,11 +56,9 @@ import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
-import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -112,7 +111,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// the list of translog readers is guaranteed to be in order of translog generation
private final List<TranslogReader> readers = new ArrayList<>();
- private volatile ScheduledFuture<?> syncScheduler;
// this is a concurrent set and is not protected by any of the locks. The main reason
// is that is being accessed by two separate classes (additions & reading are done by Translog, remove by View when closed)
private final Set<View> outstandingViews = ConcurrentCollections.newConcurrentSet();
@@ -260,7 +258,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try {
Files.delete(tempFile);
} catch (IOException ex) {
- logger.warn("failed to delete temp file {}", ex, tempFile);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
}
}
}
@@ -312,7 +310,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
closeFilesIfNoPendingViews();
}
} finally {
- FutureUtils.cancel(syncScheduler);
logger.debug("translog closed");
}
}
@@ -387,31 +384,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return newFile;
}
-
- /**
- * Read the Operation object from the given location. This method will try to read the given location from
- * the current or from the currently committing translog file. If the location is in a file that has already
- * been closed or even removed the method will return <code>null</code> instead.
- */
- public Translog.Operation read(Location location) {
- try (ReleasableLock lock = readLock.acquire()) {
- final BaseTranslogReader reader;
- final long currentGeneration = current.getGeneration();
- if (currentGeneration == location.generation) {
- reader = current;
- } else if (readers.isEmpty() == false && readers.get(readers.size() - 1).getGeneration() == location.generation) {
- reader = readers.get(readers.size() - 1);
- } else if (currentGeneration < location.generation) {
- throw new IllegalStateException("location generation [" + location.generation + "] is greater than the current generation [" + currentGeneration + "]");
- } else {
- return null;
- }
- return reader.read(location);
- } catch (IOException e) {
- throw new ElasticsearchException("failed to read source from translog location " + location, e);
- }
- }
-
/**
* Adds a delete / index operations to the transaction log.
*
@@ -435,7 +407,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
Location location = current.add(bytes);
- assert assertBytesAtLocation(location, bytes);
return location;
}
} catch (AlreadyClosedException | IOException ex) {
@@ -472,12 +443,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
}
- boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
- // tests can override this
- ByteBuffer buffer = ByteBuffer.allocate(location.size);
- current.readBytes(buffer, location.translogLocation);
- return new BytesArray(buffer.array()).equals(expectedBytes);
- }
/**
* Snapshots the current transaction log allowing to safely iterate over the snapshot.
@@ -566,6 +531,24 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return false;
}
+ /**
+ * Ensures that all locations in the given stream have been synced / written to the underlying storage.
+ * This method allows for internal optimization to minimize the amout of fsync operations if multiple
+ * locations must be synced.
+ *
+ * @return Returns <code>true</code> iff this call caused an actual sync operation otherwise <code>false</code>
+ */
+ public boolean ensureSynced(Stream<Location> locations) throws IOException {
+ final Optional<Location> max = locations.max(Location::compareTo);
+ // we only need to sync the max location since it will sync all other
+ // locations implicitly
+ if (max.isPresent()) {
+ return ensureSynced(max.get());
+ } else {
+ return false;
+ }
+ }
+
private void closeOnTragicEvent(Exception ex) {
if (current.getTragicException() != null) {
try {
@@ -655,7 +638,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
- public static class Location implements Accountable, Comparable<Location> {
+ public static class Location implements Comparable<Location> {
public final long generation;
public final long translogLocation;
@@ -667,17 +650,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
this.size = size;
}
- @Override
- public long ramBytesUsed() {
- return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * Long.BYTES + Integer.BYTES;
- }
-
- @Override
- public Collection<Accountable> getChildResources() {
- return Collections.emptyList();
- }
-
- @Override
public String toString() {
return "[generation: " + generation + ", location: " + translogLocation + ", size: " + size + "]";
}
@@ -780,7 +752,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/**
* Reads the type and the operation from the given stream. The operatino must be written with
- * {@link #writeType(Operation, StreamOutput)}
+ * {@link Operation#writeType(Operation, StreamOutput)}
*/
static Operation readType(StreamInput input) throws IOException {
Translog.Operation.Type type = Translog.Operation.Type.fromId(input.readByte());
@@ -824,8 +796,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
public static class Index implements Operation {
- public static final int SERIALIZATION_FORMAT = 6; // since 2.0-beta1 and 1.1
+ public static final int FORMAT_2x = 6; // since 2.0-beta1 and 1.1
+ public static final int FORMAT_AUTO_GENERATED_IDS = 7; // since 5.0.0-beta1
+ public static final int SERIALIZATION_FORMAT = FORMAT_AUTO_GENERATED_IDS;
private final String id;
+ private final long autoGeneratedIdTimestamp;
private final String type;
private final long version;
private final VersionType versionType;
@@ -837,7 +812,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public Index(StreamInput in) throws IOException {
final int format = in.readVInt(); // SERIALIZATION_FORMAT
- assert format == SERIALIZATION_FORMAT : "format was: " + format;
+ assert format >= FORMAT_2x : "format was: " + format;
id = in.readString();
type = in.readString();
source = in.readBytesReference();
@@ -848,6 +823,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
this.ttl = in.readLong();
this.versionType = VersionType.fromValue(in.readByte());
assert versionType.validateVersionForWrites(this.version);
+ if (format >= FORMAT_AUTO_GENERATED_IDS) {
+ this.autoGeneratedIdTimestamp = in.readLong();
+ } else {
+ this.autoGeneratedIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
+ }
}
public Index(Engine.Index index) {
@@ -860,6 +840,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
this.timestamp = index.timestamp();
this.ttl = index.ttl();
this.versionType = index.versionType();
+ this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp();
}
public Index(String type, String id, byte[] source) {
@@ -872,6 +853,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
parent = null;
timestamp = 0;
ttl = 0;
+ autoGeneratedIdTimestamp = -1;
}
@Override
@@ -937,6 +919,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
out.writeLong(timestamp);
out.writeLong(ttl);
out.writeByte(versionType.getValue());
+ out.writeLong(autoGeneratedIdTimestamp);
}
@Override
@@ -956,6 +939,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
id.equals(index.id) == false ||
type.equals(index.type) == false ||
versionType != index.versionType ||
+ autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp ||
source.equals(index.source) == false) {
return false;
}
@@ -976,6 +960,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
result = 31 * result + (routing != null ? routing.hashCode() : 0);
result = 31 * result + (parent != null ? parent.hashCode() : 0);
result = 31 * result + Long.hashCode(timestamp);
+ result = 31 * result + Long.hashCode(autoGeneratedIdTimestamp);
result = 31 * result + Long.hashCode(ttl);
return result;
}
@@ -987,6 +972,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
", type='" + type + '\'' +
'}';
}
+
+ public long getAutoGeneratedIdTimestamp() {
+ return autoGeneratedIdTimestamp;
+ }
}
public static class Delete implements Operation {
@@ -1192,7 +1181,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
@Override
- public void prepareCommit() throws IOException {
+ public long prepareCommit() throws IOException {
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (currentCommittingGeneration != NOT_SET_GENERATION) {
@@ -1215,10 +1204,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
IOUtils.closeWhileHandlingException(this); // tragic event
throw e;
}
+ return 0L;
}
@Override
- public void commit() throws IOException {
+ public long commit() throws IOException {
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (currentCommittingGeneration == NOT_SET_GENERATION) {
@@ -1231,6 +1221,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
currentCommittingGeneration = NOT_SET_GENERATION;
trimUnreferencedReaders();
}
+ return 0;
}
void trimUnreferencedReaders() {
diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
index f33ec1bd60..a08259ef32 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
@@ -26,7 +26,7 @@ import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
-public class TranslogSnapshot extends BaseTranslogReader implements Translog.Snapshot {
+final class TranslogSnapshot extends BaseTranslogReader implements Translog.Snapshot {
private final int totalOperations;
protected final long length;
@@ -51,7 +51,7 @@ public class TranslogSnapshot extends BaseTranslogReader implements Translog.Sna
}
@Override
- public final int totalOperations() {
+ public int totalOperations() {
return totalOperations;
}
@@ -64,7 +64,7 @@ public class TranslogSnapshot extends BaseTranslogReader implements Translog.Sna
}
}
- protected final Translog.Operation readOperation() throws IOException {
+ protected Translog.Operation readOperation() throws IOException {
final int opSize = readSize(reusableBuffer, position);
reuse = checksummedStream(reusableBuffer, position, opSize, reuse);
Translog.Operation op = read(reuse);
diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java
index eaf50f25a0..3b77466a91 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogToolCli.java
@@ -24,7 +24,6 @@ import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.translog.TruncateTranslogCommand;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
/**
@@ -32,25 +31,13 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
*/
public class TranslogToolCli extends MultiCommand {
- public TranslogToolCli() {
+ private TranslogToolCli() {
super("A CLI tool for various Elasticsearch translog actions");
subcommands.put("truncate", new TruncateTranslogCommand());
}
public static void main(String[] args) throws Exception {
- // initialize default for es.logger.level because we will not read the logging.yml
- String loggerLevel = System.getProperty("es.logger.level", "INFO");
- String pathHome = System.getProperty("es.path.home");
- // Set the appender for all potential log files to terminal so that other components that use the logger print out the
- // same terminal.
- Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
- .put("path.home", pathHome)
- .put("appender.terminal.type", "terminal")
- .put("rootLogger", "${logger.level}, terminal")
- .put("logger.level", loggerLevel)
- .build(), Terminal.DEFAULT);
- LogConfigurator.configure(loggingEnvironment.settings(), false);
-
exit(new TranslogToolCli().main(args, Terminal.DEFAULT));
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java b/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java
index 57decb25f5..d10a951937 100644
--- a/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java
+++ b/core/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java
@@ -19,7 +19,7 @@
package org.elasticsearch.index.warmer;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.index.IndexSettings;
@@ -28,8 +28,6 @@ import org.elasticsearch.index.shard.ShardId;
import java.util.concurrent.TimeUnit;
-/**
- */
public class ShardIndexWarmerService extends AbstractIndexShardComponent {
private final CounterMetric current = new CounterMetric();
@@ -39,7 +37,7 @@ public class ShardIndexWarmerService extends AbstractIndexShardComponent {
super(shardId, indexSettings);
}
- public ESLogger logger() {
+ public Logger logger() {
return this.logger;
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
index 2e82e81959..3b4258a8bd 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
@@ -19,6 +19,8 @@
package org.elasticsearch.indices;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -29,7 +31,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
-import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.IndexingOperationListener;
@@ -50,7 +51,8 @@ import java.util.concurrent.locks.ReentrantLock;
public class IndexingMemoryController extends AbstractComponent implements IndexingOperationListener, Closeable {
/** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */
- public static final Setting<ByteSizeValue> INDEX_BUFFER_SIZE_SETTING = Setting.byteSizeSetting("indices.memory.index_buffer_size", "10%", Property.NodeScope);
+ public static final Setting<ByteSizeValue> INDEX_BUFFER_SIZE_SETTING =
+ Setting.memorySizeSetting("indices.memory.index_buffer_size", "10%", Property.NodeScope);
/** Only applies when <code>indices.memory.index_buffer_size</code> is a %, to set a floor on the actual size in bytes (default: 48 MB). */
public static final Setting<ByteSizeValue> MIN_INDEX_BUFFER_SIZE_SETTING = Setting.byteSizeSetting("indices.memory.min_index_buffer_size",
@@ -103,10 +105,10 @@ public class IndexingMemoryController extends AbstractComponent implements Index
// We only apply the min/max when % value was used for the index buffer:
ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(this.settings);
ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(this.settings);
- if (indexingBuffer.bytes() < minIndexingBuffer.bytes()) {
+ if (indexingBuffer.getBytes() < minIndexingBuffer.getBytes()) {
indexingBuffer = minIndexingBuffer;
}
- if (maxIndexingBuffer.bytes() != -1 && indexingBuffer.bytes() > maxIndexingBuffer.bytes()) {
+ if (maxIndexingBuffer.getBytes() != -1 && indexingBuffer.getBytes() > maxIndexingBuffer.getBytes()) {
indexingBuffer = maxIndexingBuffer;
}
}
@@ -177,7 +179,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
@Override
public void onFailure(Exception e) {
- logger.warn("failed to write indexing buffer for shard [{}]; ignoring", e, shard.shardId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
}
});
}
@@ -242,13 +244,13 @@ public class IndexingMemoryController extends AbstractComponent implements Index
public void bytesWritten(int bytes) {
long totalBytes = bytesWrittenSinceCheck.addAndGet(bytes);
assert totalBytes >= 0;
- while (totalBytes > indexingBuffer.bytes()/30) {
+ while (totalBytes > indexingBuffer.getBytes()/30) {
if (runLock.tryLock()) {
try {
// Must pull this again because it may have changed since we first checked:
totalBytes = bytesWrittenSinceCheck.get();
- if (totalBytes > indexingBuffer.bytes()/30) {
+ if (totalBytes > indexingBuffer.getBytes()/30) {
bytesWrittenSinceCheck.addAndGet(-totalBytes);
// NOTE: this is only an approximate check, because bytes written is to the translog, vs indexing memory buffer which is
// typically smaller but can be larger in extreme cases (many unique terms). This logic is here only as a safety against
@@ -317,9 +319,9 @@ public class IndexingMemoryController extends AbstractComponent implements Index
// If we are using more than 50% of our budget across both indexing buffer and bytes we are still moving to disk, then we now
// throttle the top shards to send back-pressure to ongoing indexing:
- boolean doThrottle = (totalBytesWriting + totalBytesUsed) > 1.5 * indexingBuffer.bytes();
+ boolean doThrottle = (totalBytesWriting + totalBytesUsed) > 1.5 * indexingBuffer.getBytes();
- if (totalBytesUsed > indexingBuffer.bytes()) {
+ if (totalBytesUsed > indexingBuffer.getBytes()) {
// OK we are now over-budget; fill the priority queue and ask largest shard(s) to refresh:
PriorityQueue<ShardAndBytesUsed> queue = new PriorityQueue<>();
@@ -354,7 +356,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
logger.debug("now write some indexing buffers: total indexing heap bytes used [{}] vs {} [{}], currently writing bytes [{}], [{}] shards with non-zero indexing buffer",
new ByteSizeValue(totalBytesUsed), INDEX_BUFFER_SIZE_SETTING.getKey(), indexingBuffer, new ByteSizeValue(totalBytesWriting), queue.size());
- while (totalBytesUsed > indexingBuffer.bytes() && queue.isEmpty() == false) {
+ while (totalBytesUsed > indexingBuffer.getBytes() && queue.isEmpty() == false) {
ShardAndBytesUsed largest = queue.poll();
logger.debug("write indexing buffer to disk for shard [{}] to free up its [{}] indexing buffer", largest.shard.shardId(), new ByteSizeValue(largest.bytesUsed));
writeIndexingBufferAsync(largest.shard);
@@ -383,8 +385,8 @@ public class IndexingMemoryController extends AbstractComponent implements Index
protected void checkIdle(IndexShard shard, long inactiveTimeNS) {
try {
shard.checkIdle(inactiveTimeNS);
- } catch (EngineClosedException | FlushNotAllowedEngineException e) {
- logger.trace("ignore exception while checking if shard {} is inactive", e, shard.shardId());
+ } catch (EngineClosedException e) {
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
index d45725ff4e..eb1843dc7d 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
@@ -26,7 +26,6 @@ import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.AllFieldMapper;
@@ -37,23 +36,24 @@ import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
-import org.elasticsearch.index.mapper.IdFieldMapper;
-import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.IpFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.ObjectMapper;
+import org.elasticsearch.index.mapper.TextFieldMapper;
+import org.elasticsearch.index.mapper.TokenCountFieldMapper;
+import org.elasticsearch.index.mapper.ScaledFloatFieldMapper;
+import org.elasticsearch.index.mapper.StringFieldMapper;
+import org.elasticsearch.index.mapper.IdFieldMapper;
+import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
-import org.elasticsearch.index.mapper.ScaledFloatFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
-import org.elasticsearch.index.mapper.StringFieldMapper;
-import org.elasticsearch.index.mapper.TTLFieldMapper;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.TimestampFieldMapper;
-import org.elasticsearch.index.mapper.TokenCountFieldMapper;
+import org.elasticsearch.index.mapper.TTLFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
@@ -61,8 +61,8 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.recovery.RecoverySettings;
-import org.elasticsearch.indices.recovery.RecoverySource;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import org.elasticsearch.indices.ttl.IndicesTTLService;
@@ -120,6 +120,7 @@ public class IndicesModule extends AbstractModule {
mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
mappers.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
+ mappers.put(LatLonPointFieldMapper.CONTENT_TYPE, new LatLonPointFieldMapper.TypeParser());
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
mappers.put(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
}
@@ -173,10 +174,9 @@ public class IndicesModule extends AbstractModule {
protected void configure() {
bindMapperExtension();
- bind(IndicesService.class).asEagerSingleton();
bind(RecoverySettings.class).asEagerSingleton();
- bind(RecoveryTargetService.class).asEagerSingleton();
- bind(RecoverySource.class).asEagerSingleton();
+ bind(PeerRecoveryTargetService.class).asEagerSingleton();
+ bind(PeerRecoverySourceService.class).asEagerSingleton();
bind(IndicesStore.class).asEagerSingleton();
bind(IndicesClusterStateService.class).asEagerSingleton();
bind(SyncedFlushService.class).asEagerSingleton();
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
index 70b9443e04..f33818ca32 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
@@ -49,13 +49,13 @@ import java.util.function.Predicate;
public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable {
- public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE_SETTING = Setting.byteSizeSetting(
- "indices.queries.cache.size", "10%", Property.NodeScope);
- public static final Setting<Integer> INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting(
- "indices.queries.cache.count", 10000, 1, Property.NodeScope);
+ public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE_SETTING =
+ Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope);
+ public static final Setting<Integer> INDICES_CACHE_QUERY_COUNT_SETTING =
+ Setting.intSetting("indices.queries.cache.count", 10000, 1, Property.NodeScope);
// enables caching on all segments instead of only the larger ones, for testing only
- public static final Setting<Boolean> INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting(
- "indices.queries.cache.all_segments", false, Property.NodeScope);
+ public static final Setting<Boolean> INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING =
+ Setting.boolSetting("indices.queries.cache.all_segments", false, Property.NodeScope);
private final LRUQueryCache cache;
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
@@ -74,9 +74,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
logger.debug("using [node] query cache with size [{}] max filter count [{}]",
size, count);
if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) {
- cache = new ElasticsearchLRUQueryCache(count, size.bytes(), context -> true);
+ cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), context -> true);
} else {
- cache = new ElasticsearchLRUQueryCache(count, size.bytes());
+ cache = new ElasticsearchLRUQueryCache(count, size.getBytes());
}
sharedRamBytesUsed = 0;
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java
index 5229de3f16..ff3713a374 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java
@@ -72,7 +72,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING =
Setting.boolSetting("index.requests.cache.enable", true, Property.Dynamic, Property.IndexScope);
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE =
- Setting.byteSizeSetting("indices.requests.cache.size", "1%", Property.NodeScope);
+ Setting.memorySizeSetting("indices.requests.cache.size", "1%", Property.NodeScope);
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE =
Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope);
@@ -86,7 +86,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
super(settings);
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
- long sizeInBytes = size.bytes();
+ long sizeInBytes = size.getBytes();
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);
if (expire != null) {
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index 7519494c39..abc9873efa 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -20,6 +20,9 @@
package org.elasticsearch.indices;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.CollectionUtil;
@@ -36,6 +39,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
@@ -43,13 +47,11 @@ import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
@@ -97,8 +99,8 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.search.internal.SearchContext;
@@ -168,7 +170,6 @@ public class IndicesService extends AbstractLifecycleComponent
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.cacheCleaner);
}
- @Inject
public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv,
ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry,
IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
@@ -219,7 +220,7 @@ public class IndicesService extends AbstractLifecycleComponent
try {
removeIndex(index, "shutdown", false);
} catch (Exception e) {
- logger.warn("failed to remove index on stop [{}]", e, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to remove index on stop [{}]", index), e);
} finally {
latch.countDown();
}
@@ -297,7 +298,7 @@ public class IndicesService extends AbstractLifecycleComponent
}
} catch (IllegalIndexShardStateException e) {
// we can safely ignore illegal state on ones that are closing for example
- logger.trace("{} ignoring shard stats", e, indexShard.shardId());
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e);
}
}
}
@@ -440,16 +441,16 @@ public class IndicesService extends AbstractLifecycleComponent
}
@Override
- public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
- RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
- NodeServicesProvider nodeServicesProvider, Callback<IndexShard.ShardFailure> onShardFailure) throws IOException {
+ public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService,
+ PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ NodeServicesProvider nodeServicesProvider, Callback<IndexShard.ShardFailure> onShardFailure) throws IOException {
ensureChangesAllowed();
IndexService indexService = indexService(shardRouting.index());
IndexShard indexShard = indexService.createShard(shardRouting);
indexShard.addShardFailureCallback(onShardFailure);
indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService,
(type, mapping) -> {
- assert recoveryState.getType() == RecoveryState.Type.LOCAL_SHARDS :
+ assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS:
"mapping update consumer only required by local shards recovery";
try {
nodeServicesProvider.getClient().admin().indices().preparePutMapping()
@@ -475,7 +476,7 @@ public class IndicesService extends AbstractLifecycleComponent
try {
removeIndex(index, reason, false);
} catch (Exception e) {
- logger.warn("failed to remove index ({})", e, reason);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to remove index ({})", reason), e);
}
}
@@ -566,7 +567,7 @@ public class IndicesService extends AbstractLifecycleComponent
try {
removeIndex(index, reason, true);
} catch (Exception e) {
- logger.warn("failed to delete index ({})", e, reason);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete index ({})", reason), e);
}
}
@@ -586,7 +587,7 @@ public class IndicesService extends AbstractLifecycleComponent
}
deleteIndexStore(reason, metaData, clusterState);
} catch (IOException e) {
- logger.warn("[{}] failed to delete unassigned index (reason [{}])", e, metaData.getIndex(), reason);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e);
}
}
}
@@ -638,9 +639,9 @@ public class IndicesService extends AbstractLifecycleComponent
}
success = true;
} catch (LockObtainFailedException ex) {
- logger.debug("{} failed to delete index store - at least one shards is still locked", ex, index);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index store - at least one shards is still locked", index), ex);
} catch (Exception ex) {
- logger.warn("{} failed to delete index", ex, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index", index), ex);
} finally {
if (success == false) {
addPendingDelete(index, indexSettings);
@@ -747,7 +748,7 @@ public class IndicesService extends AbstractLifecycleComponent
try {
metaData = metaStateService.loadIndexState(index);
} catch (IOException e) {
- logger.warn("[{}] failed to load state file from a stale deleted index, folders will be left on disk", e, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, folders will be left on disk", index), e);
return null;
}
final IndexSettings indexSettings = buildIndexSettings(metaData);
@@ -756,7 +757,7 @@ public class IndicesService extends AbstractLifecycleComponent
} catch (IOException e) {
// we just warn about the exception here because if deleteIndexStoreIfDeletionAllowed
// throws an exception, it gets added to the list of pending deletes to be tried again
- logger.warn("[{}] failed to delete index on disk", e, metaData.getIndex());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete index on disk", metaData.getIndex()), e);
}
return metaData;
}
@@ -928,7 +929,7 @@ public class IndicesService extends AbstractLifecycleComponent
nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings);
iterator.remove();
} catch (IOException ex) {
- logger.debug("{} retry pending delete", ex, index);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", index), ex);
}
} else {
assert delete.shardId != -1;
@@ -938,7 +939,7 @@ public class IndicesService extends AbstractLifecycleComponent
deleteShardStore("pending delete", shardLock, delete.settings);
iterator.remove();
} catch (IOException ex) {
- logger.debug("{} retry pending delete", ex, shardLock.getShardId());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
}
} else {
logger.warn("{} no shard lock for pending delete", delete.shardId);
@@ -1001,13 +1002,13 @@ public class IndicesService extends AbstractLifecycleComponent
private static final class CacheCleaner implements Runnable, Releasable {
private final IndicesFieldDataCache cache;
- private final ESLogger logger;
+ private final Logger logger;
private final ThreadPool threadPool;
private final TimeValue interval;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final IndicesRequestCache requestCache;
- public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, ESLogger logger, ThreadPool threadPool, TimeValue interval) {
+ public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, Logger logger, ThreadPool threadPool, TimeValue interval) {
this.cache = cache;
this.requestCache = requestCache;
this.logger = logger;
diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java
index f205cdd856..6c251d3bf1 100644
--- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java
+++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java
@@ -35,12 +35,12 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats;
-import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats;
@@ -154,7 +154,7 @@ public class NodeIndicesStats implements Streamable, ToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
- stats = CommonStats.readCommonStats(in);
+ stats = new CommonStats(in);
if (in.readBoolean()) {
int entries = in.readVInt();
statsByShard = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
index 52647c8b8f..5dd0203d61 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
@@ -90,6 +90,7 @@ import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
+import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
@@ -214,6 +215,7 @@ public final class AnalysisModule {
tokenFilters.register("edgeNGram", EdgeNGramTokenFilterFactory::new);
tokenFilters.register("edge_ngram", EdgeNGramTokenFilterFactory::new);
tokenFilters.register("shingle", ShingleTokenFilterFactory::new);
+ tokenFilters.register("min_hash", MinHashTokenFilterFactory::new);
tokenFilters.register("unique", UniqueTokenFilterFactory::new);
tokenFilters.register("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
tokenFilters.register("trim", TrimTokenFilterFactory::new);
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
index f3812f6900..a324e8282a 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.indices.analysis;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.analysis.hunspell.Dictionary;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
@@ -138,7 +140,9 @@ public class HunspellService extends AbstractComponent {
} catch (Exception e) {
// The cache loader throws unchecked exception (see #loadDictionary()),
// here we simply report the exception and continue loading the dictionaries
- logger.error("exception while loading dictionary {}", e, file.getFileName());
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "exception while loading dictionary {}", file.getFileName()), e);
}
}
}
@@ -196,7 +200,7 @@ public class HunspellService extends AbstractComponent {
}
} catch (Exception e) {
- logger.error("Could not load hunspell dictionary [{}]", e, locale);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
throw e;
} finally {
IOUtils.close(affixStream);
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java
index ca95461992..23ef9bdcd3 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.analysis;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
@@ -58,7 +59,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
import org.apache.lucene.analysis.th.ThaiAnalyzer;
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.Version;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.index.analysis.PatternAnalyzer;
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
index 6b69658b34..a31f60fc5b 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
@@ -18,6 +18,9 @@
*/
package org.elasticsearch.indices.analysis;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
import org.apache.lucene.analysis.ar.ArabicStemFilter;
@@ -27,9 +30,7 @@ import org.apache.lucene.analysis.cjk.CJKWidthFilter;
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
import org.apache.lucene.analysis.core.DecimalDigitFilter;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.core.UpperCaseFilter;
import org.apache.lucene.analysis.cz.CzechStemFilter;
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
@@ -60,7 +61,6 @@ import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.ClassicFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.tr.ApostropheFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ElisionFilter;
import org.elasticsearch.Version;
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/AllCircuitBreakerStats.java b/core/src/main/java/org/elasticsearch/indices/breaker/AllCircuitBreakerStats.java
index 09f0cd9900..3022d885e1 100644
--- a/core/src/main/java/org/elasticsearch/indices/breaker/AllCircuitBreakerStats.java
+++ b/core/src/main/java/org/elasticsearch/indices/breaker/AllCircuitBreakerStats.java
@@ -21,7 +21,7 @@ package org.elasticsearch.indices.breaker;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -30,16 +30,21 @@ import java.io.IOException;
/**
* Stats class encapsulating all of the different circuit breaker stats
*/
-public class AllCircuitBreakerStats implements Streamable, ToXContent {
+public class AllCircuitBreakerStats implements Writeable, ToXContent {
- private CircuitBreakerStats[] allStats = new CircuitBreakerStats[0];
+ private final CircuitBreakerStats[] allStats;
- public AllCircuitBreakerStats() {
+ public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) {
+ this.allStats = allStats;
+ }
+ public AllCircuitBreakerStats(StreamInput in) throws IOException {
+ allStats = in.readArray(CircuitBreakerStats::new, CircuitBreakerStats[]::new);
}
- public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) {
- this.allStats = allStats;
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeArray(allStats);
}
public CircuitBreakerStats[] getAllStats() {
@@ -55,33 +60,6 @@ public class AllCircuitBreakerStats implements Streamable, ToXContent {
return null;
}
- public static AllCircuitBreakerStats readOptionalAllCircuitBreakerStats(StreamInput in) throws IOException {
- AllCircuitBreakerStats stats = in.readOptionalStreamable(AllCircuitBreakerStats::new);
- return stats;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- int statCount = in.readVInt();
- CircuitBreakerStats[] newStats = new CircuitBreakerStats[statCount];
- for (int i = 0; i < statCount; i++) {
- CircuitBreakerStats stats = new CircuitBreakerStats();
- stats.readFrom(in);
- newStats[i] = stats;
- }
- allStats = newStats;
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(allStats.length);
- for (CircuitBreakerStats stats : allStats) {
- if (stats != null) {
- stats.writeTo(out);
- }
- }
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.BREAKERS);
diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerStats.java b/core/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerStats.java
index eca235711b..a079e29edf 100644
--- a/core/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerStats.java
+++ b/core/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerStats.java
@@ -21,7 +21,7 @@ package org.elasticsearch.indices.breaker;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -32,17 +32,13 @@ import java.util.Locale;
/**
* Class encapsulating stats about the circuit breaker
*/
-public class CircuitBreakerStats implements Streamable, ToXContent {
+public class CircuitBreakerStats implements Writeable, ToXContent {
- private String name;
- private long limit;
- private long estimated;
- private long trippedCount;
- private double overhead;
-
- CircuitBreakerStats() {
-
- }
+ private final String name;
+ private final long limit;
+ private final long estimated;
+ private final long trippedCount;
+ private final double overhead;
public CircuitBreakerStats(String name, long limit, long estimated, double overhead, long trippedCount) {
this.name = name;
@@ -52,6 +48,23 @@ public class CircuitBreakerStats implements Streamable, ToXContent {
this.overhead = overhead;
}
+ public CircuitBreakerStats(StreamInput in) throws IOException {
+ limit = in.readLong();
+ estimated = in.readLong();
+ overhead = in.readDouble();
+ this.trippedCount = in.readLong();
+ this.name = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(limit);
+ out.writeLong(estimated);
+ out.writeDouble(overhead);
+ out.writeLong(trippedCount);
+ out.writeString(name);
+ }
+
public String getName() {
return this.name;
}
@@ -72,30 +85,6 @@ public class CircuitBreakerStats implements Streamable, ToXContent {
return this.overhead;
}
- public static CircuitBreakerStats readOptionalCircuitBreakerStats(StreamInput in) throws IOException {
- CircuitBreakerStats stats = in.readOptionalStreamable(CircuitBreakerStats::new);
- return stats;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- // limit is the maximum from the old circuit breaker stats for backwards compatibility
- limit = in.readLong();
- estimated = in.readLong();
- overhead = in.readDouble();
- this.trippedCount = in.readLong();
- this.name = in.readString();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeLong(limit);
- out.writeLong(estimated);
- out.writeDouble(overhead);
- out.writeLong(trippedCount);
- out.writeString(name);
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name.toLowerCase(Locale.ROOT));
diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
index 6557148209..de31a57283 100644
--- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
+++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
@@ -47,24 +47,24 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private final ConcurrentMap<String, CircuitBreaker> breakers = new ConcurrentHashMap<>();
public static final Setting<ByteSizeValue> TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING =
- Setting.byteSizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope);
+ Setting.memorySizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope);
public static final Setting<ByteSizeValue> FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING =
- Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope);
+ Setting.memorySizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope);
public static final Setting<Double> FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING =
Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, Property.Dynamic, Property.NodeScope);
public static final Setting<CircuitBreaker.Type> FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING =
new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope);
public static final Setting<ByteSizeValue> REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING =
- Setting.byteSizeSetting("indices.breaker.request.limit", "60%", Property.Dynamic, Property.NodeScope);
+ Setting.memorySizeSetting("indices.breaker.request.limit", "60%", Property.Dynamic, Property.NodeScope);
public static final Setting<Double> REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING =
Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope);
public static final Setting<CircuitBreaker.Type> REQUEST_CIRCUIT_BREAKER_TYPE_SETTING =
new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope);
public static final Setting<ByteSizeValue> IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING =
- Setting.byteSizeSetting("network.breaker.inflight_requests.limit", "100%", Property.Dynamic, Property.NodeScope);
+ Setting.memorySizeSetting("network.breaker.inflight_requests.limit", "100%", Property.Dynamic, Property.NodeScope);
public static final Setting<Double> IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING =
Setting.doubleSetting("network.breaker.inflight_requests.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope);
public static final Setting<CircuitBreaker.Type> IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING =
@@ -81,25 +81,25 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA,
- FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
+ FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(),
FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
);
this.inFlightRequestsSettings = new BreakerSettings(CircuitBreaker.IN_FLIGHT_REQUESTS,
- IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
+ IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(),
IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
);
this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST,
- REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
+ REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(),
REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
);
this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT,
- TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0,
+ TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), 1.0,
CircuitBreaker.Type.PARENT);
if (logger.isTraceEnabled()) {
@@ -117,7 +117,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
}
private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) {
- BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.bytes(), newRequestOverhead,
+ BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.getBytes(), newRequestOverhead,
HierarchyCircuitBreakerService.this.requestSettings.getType());
registerBreaker(newRequestSettings);
HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings;
@@ -125,7 +125,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
}
private void setInFlightRequestsBreakerLimit(ByteSizeValue newInFlightRequestsMax, Double newInFlightRequestsOverhead) {
- BreakerSettings newInFlightRequestsSettings = new BreakerSettings(CircuitBreaker.IN_FLIGHT_REQUESTS, newInFlightRequestsMax.bytes(),
+ BreakerSettings newInFlightRequestsSettings = new BreakerSettings(CircuitBreaker.IN_FLIGHT_REQUESTS, newInFlightRequestsMax.getBytes(),
newInFlightRequestsOverhead, HierarchyCircuitBreakerService.this.inFlightRequestsSettings.getType());
registerBreaker(newInFlightRequestsSettings);
HierarchyCircuitBreakerService.this.inFlightRequestsSettings = newInFlightRequestsSettings;
@@ -133,7 +133,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
}
private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newFielddataOverhead) {
- long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes();
+ long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.getBytes();
newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead;
BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead,
HierarchyCircuitBreakerService.this.fielddataSettings.getType());
@@ -143,13 +143,13 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
}
private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) {
- BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT);
+ BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT);
validateSettings(new BreakerSettings[]{newParentSettings});
return true;
}
private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) {
- BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT);
+ BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT);
this.parentSettings = newParentSettings;
}
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index fd77722f86..920de8ae46 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -19,6 +19,9 @@
package org.elasticsearch.indices.cluster;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.store.LockObtainFailedException;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
@@ -28,6 +31,8 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.Type;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -35,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@@ -59,10 +63,10 @@ import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.flush.SyncedFlushService;
+import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
-import org.elasticsearch.indices.recovery.RecoverySource;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.snapshots.RestoreService;
@@ -85,7 +89,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService;
private final ClusterService clusterService;
private final ThreadPool threadPool;
- private final RecoveryTargetService recoveryTargetService;
+ private final PeerRecoveryTargetService recoveryTargetService;
private final ShardStateAction shardStateAction;
private final NodeMappingRefreshAction nodeMappingRefreshAction;
private final NodeServicesProvider nodeServicesProvider;
@@ -106,15 +110,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
@Inject
public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService,
- ThreadPool threadPool, RecoveryTargetService recoveryTargetService,
+ ThreadPool threadPool, PeerRecoveryTargetService recoveryTargetService,
ShardStateAction shardStateAction,
NodeMappingRefreshAction nodeMappingRefreshAction,
RepositoriesService repositoriesService, RestoreService restoreService,
SearchService searchService, SyncedFlushService syncedFlushService,
- RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) {
+ PeerRecoverySourceService peerRecoverySourceService, NodeServicesProvider nodeServicesProvider) {
this(settings, (AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>>) indicesService,
clusterService, threadPool, recoveryTargetService, shardStateAction,
- nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, recoverySource,
+ nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, peerRecoverySourceService,
nodeServicesProvider);
}
@@ -122,14 +126,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
IndicesClusterStateService(Settings settings,
AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService,
ClusterService clusterService,
- ThreadPool threadPool, RecoveryTargetService recoveryTargetService,
+ ThreadPool threadPool, PeerRecoveryTargetService recoveryTargetService,
ShardStateAction shardStateAction,
NodeMappingRefreshAction nodeMappingRefreshAction,
RepositoriesService repositoriesService, RestoreService restoreService,
SearchService searchService, SyncedFlushService syncedFlushService,
- RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) {
+ PeerRecoverySourceService peerRecoverySourceService, NodeServicesProvider nodeServicesProvider) {
super(settings);
- this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTargetService, searchService, syncedFlushService);
+ this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, syncedFlushService);
this.indicesService = indicesService;
this.clusterService = clusterService;
this.threadPool = threadPool;
@@ -182,7 +186,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
failMissingShards(state);
- removeShards(state);
+ removeShards(state); // removes any local shards that doesn't match what the master expects
updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache
@@ -267,7 +271,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
- logger.warn("[{}] failed to complete pending deletion for index", e, index);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
}
@Override
@@ -365,14 +370,24 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
ShardRouting currentRoutingEntry = shard.routingEntry();
ShardId shardId = currentRoutingEntry.shardId();
ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId);
- if (newShardRouting == null || newShardRouting.isSameAllocation(currentRoutingEntry) == false) {
+ if (newShardRouting == null) {
// we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore
// once all shards are allocated
logger.debug("{} removing shard (not allocated)", shardId);
indexService.removeShard(shardId.id(), "removing shard (not allocated)");
+ } else if (newShardRouting.isSameAllocation(currentRoutingEntry) == false) {
+ logger.debug("{} removing shard (stale allocation id, stale {}, new {})", shardId,
+ currentRoutingEntry, newShardRouting);
+ indexService.removeShard(shardId.id(), "removing shard (stale copy)");
+ } else if (newShardRouting.initializing() && currentRoutingEntry.active()) {
+ // this can happen if the node was isolated/gc-ed, rejoins the cluster and a new shard with the same allocation id
+ // is assigned to it. Batch cluster state processing or if shard fetching completes before the node gets a new cluster
+ // state may result in a new shard being initialized while having the same allocation id as the currently started shard.
+ logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
+ indexService.removeShard(shardId.id(), "removing shard (stale copy)");
} else {
// remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards
- if (newShardRouting.isPeerRecovery()) {
+ if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) {
RecoveryState recoveryState = shard.recoveryState();
final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting);
if (recoveryState.getSourceNode().equals(sourceNode) == false) {
@@ -488,7 +503,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
Shard shard = indexService.getShardOrNull(shardId.id());
if (shard == null) {
assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards";
- createShard(nodes, routingTable, shardRouting, indexService);
+ createShard(nodes, routingTable, shardRouting);
} else {
updateShard(nodes, shardRouting, shard);
}
@@ -496,12 +511,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
}
}
- private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting,
- AllocatedIndex<? extends Shard> indexService) {
+ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting) {
assert shardRouting.initializing() : "only allow shard creation for initializing shard but was " + shardRouting;
DiscoveryNode sourceNode = null;
- if (shardRouting.isPeerRecovery()) {
+ if (shardRouting.recoverySource().getType() == Type.PEER) {
sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, shardRouting);
if (sourceNode == null) {
logger.trace("ignoring initializing shard {} - no source node can be found.", shardRouting.shardId());
@@ -511,8 +525,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
try {
logger.debug("{} creating shard", shardRouting.shardId());
- RecoveryState recoveryState = recoveryState(nodes.getLocalNode(), sourceNode, shardRouting,
- indexService.getIndexSettings().getIndexMetaData());
+ RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode);
indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting),
repositoriesService, nodeServicesProvider, failedShardHandler);
} catch (IndexShardAlreadyExistsException e) {
@@ -556,10 +569,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
/**
* Finds the routing source node for peer recovery, return null if its not found. Note, this method expects the shard
- * routing to *require* peer recovery, use {@link ShardRouting#isPeerRecovery()} to
+ * routing to *require* peer recovery, use {@link ShardRouting#recoverySource()} to
* check if its needed or not.
*/
- private static DiscoveryNode findSourceNodeForPeerRecovery(ESLogger logger, RoutingTable routingTable, DiscoveryNodes nodes,
+ private static DiscoveryNode findSourceNodeForPeerRecovery(Logger logger, RoutingTable routingTable, DiscoveryNodes nodes,
ShardRouting shardRouting) {
DiscoveryNode sourceNode = null;
if (!shardRouting.primary()) {
@@ -586,7 +599,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
return sourceNode;
}
- private class RecoveryListener implements RecoveryTargetService.RecoveryListener {
+ private class RecoveryListener implements PeerRecoveryTargetService.RecoveryListener {
private final ShardRouting shardRouting;
@@ -596,29 +609,20 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
@Override
public void onRecoveryDone(RecoveryState state) {
- if (state.getType() == RecoveryState.Type.SNAPSHOT) {
- restoreService.indexShardRestoreCompleted(state.getRestoreSource().snapshot(), shardRouting.shardId());
- }
- shardStateAction.shardStarted(shardRouting, message(state), SHARD_STATE_ACTION_LISTENER);
- }
-
- private String message(RecoveryState state) {
- switch (state.getType()) {
- case SNAPSHOT: return "after recovery from repository";
- case STORE: return "after recovery from store";
- case PRIMARY_RELOCATION: return "after recovery (primary relocation) from node [" + state.getSourceNode() + "]";
- case REPLICA: return "after recovery (replica) from node [" + state.getSourceNode() + "]";
- case LOCAL_SHARDS: return "after recovery from local shards";
- default: throw new IllegalArgumentException("Unknown recovery type: " + state.getType().name());
+ if (state.getRecoverySource().getType() == Type.SNAPSHOT) {
+ SnapshotRecoverySource snapshotRecoverySource = (SnapshotRecoverySource) state.getRecoverySource();
+ restoreService.indexShardRestoreCompleted(snapshotRecoverySource.snapshot(), shardRouting.shardId());
}
+ shardStateAction.shardStarted(shardRouting, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER);
}
@Override
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
- if (state.getType() == RecoveryState.Type.SNAPSHOT) {
+ if (state.getRecoverySource().getType() == Type.SNAPSHOT) {
try {
if (Lucene.isCorruptionException(e.getCause())) {
- restoreService.failRestore(state.getRestoreSource().snapshot(), shardRouting.shardId());
+ SnapshotRecoverySource snapshotRecoverySource = (SnapshotRecoverySource) state.getRecoverySource();
+ restoreService.failRestore(snapshotRecoverySource.snapshot(), shardRouting.shardId());
}
} catch (Exception inner) {
e.addSuppressed(inner);
@@ -631,27 +635,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
}
}
- private RecoveryState recoveryState(DiscoveryNode localNode, DiscoveryNode sourceNode, ShardRouting shardRouting,
- IndexMetaData indexMetaData) {
- assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting;
- if (shardRouting.isPeerRecovery()) {
- assert sourceNode != null : "peer recovery started but sourceNode is null for " + shardRouting;
- RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
- return new RecoveryState(shardRouting.shardId(), shardRouting.primary(), type, sourceNode, localNode);
- } else if (shardRouting.restoreSource() == null) {
- // recover from filesystem store
- Index mergeSourceIndex = indexMetaData.getMergeSourceIndex();
- final boolean recoverFromLocalShards = mergeSourceIndex != null && shardRouting.allocatedPostIndexCreate(indexMetaData) == false
- && shardRouting.primary();
- return new RecoveryState(shardRouting.shardId(), shardRouting.primary(),
- recoverFromLocalShards ? RecoveryState.Type.LOCAL_SHARDS : RecoveryState.Type.STORE, localNode, localNode);
- } else {
- // recover from a snapshot
- return new RecoveryState(shardRouting.shardId(), shardRouting.primary(),
- RecoveryState.Type.SNAPSHOT, shardRouting.restoreSource(), localNode);
- }
- }
-
private synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) {
failAndRemoveShard(shardRouting, sendShardFailure, "failed recovery", failure);
}
@@ -667,11 +650,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
} catch (Exception inner) {
inner.addSuppressed(failure);
logger.warn(
- "[{}][{}] failed to remove shard after failure ([{}])",
- inner,
- shardRouting.getIndexName(),
- shardRouting.getId(),
- message);
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}][{}] failed to remove shard after failure ([{}])",
+ shardRouting.getIndexName(),
+ shardRouting.getId(),
+ message),
+ inner);
}
if (sendShardFailure) {
sendFailShard(shardRouting, message, failure);
@@ -680,17 +664,20 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure) {
try {
- logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
failedShardsCache.put(shardRouting.shardId(), shardRouting);
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
} catch (Exception inner) {
if (failure != null) inner.addSuppressed(failure);
logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
"[{}][{}] failed to mark shard as failed (because of [{}])",
- inner,
shardRouting.getIndexName(),
shardRouting.getId(),
- message);
+ message),
+ inner);
}
}
@@ -821,8 +808,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
/**
* Creates shard for the specified shard routing and starts recovery,
*/
- T createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
- RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ T createShard(ShardRouting shardRouting, RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService,
+ PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
NodeServicesProvider nodeServicesProvider, Callback<IndexShard.ShardFailure> onShardFailure) throws IOException;
/**
diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
index 3ab18dd1bd..860bdeb8ff 100644
--- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
+++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.fielddata.cache;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -31,7 +32,6 @@ import org.elasticsearch.common.cache.RemovalListener;
import org.elasticsearch.common.cache.RemovalNotification;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -51,14 +51,14 @@ import java.util.function.ToLongBiFunction;
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable>, Releasable{
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY =
- Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope);
+ Setting.memorySizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope);
private final IndexFieldDataCache.Listener indicesFieldDataCacheListener;
private final Cache<Key, Accountable> cache;
public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener indicesFieldDataCacheListener) {
super(settings);
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
- final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).bytes();
+ final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).getBytes();
CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.<Key, Accountable>builder()
.removalListener(this);
if (sizeInBytes > 0) {
@@ -108,13 +108,13 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
* A specific cache instance for the relevant parameters of it (index, fieldNames, fieldType).
*/
static class IndexFieldCache implements IndexFieldDataCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener {
- private final ESLogger logger;
+ private final Logger logger;
final Index index;
final String fieldName;
private final Cache<Key, Accountable> cache;
private final Listener[] listeners;
- IndexFieldCache(ESLogger logger,final Cache<Key, Accountable> cache, Index index, String fieldName, Listener... listeners) {
+ IndexFieldCache(Logger logger,final Cache<Key, Accountable> cache, Index index, String fieldName, Listener... listeners) {
this.logger = logger;
this.listeners = listeners;
this.index = index;
diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
index 236b376871..273682db31 100644
--- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
+++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.indices.flush;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
@@ -50,12 +52,12 @@ import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@@ -100,7 +102,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void onFailure(Exception e) {
- logger.debug("{} sync flush on inactive shard failed", e, indexShard.shardId());
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
}
});
}
@@ -335,7 +337,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void handleException(TransportException exp) {
- logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@@ -391,7 +393,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void handleException(TransportException exp) {
- logger.trace("{} error while performing pre synced flush on [{}], skipping", exp, shardId, shard);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java
index c17b7e2a25..3fd12a6993 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java
@@ -51,7 +51,7 @@ import java.util.function.Supplier;
* The source recovery accepts recovery requests from other peer shards and start the recovery process from this
* source shard to the target shard.
*/
-public class RecoverySource extends AbstractComponent implements IndexEventListener{
+public class PeerRecoverySourceService extends AbstractComponent implements IndexEventListener {
public static class Actions {
public static final String START_RECOVERY = "internal:index/shard/recovery/start_recovery";
@@ -66,8 +66,8 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries();
@Inject
- public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
- RecoverySettings recoverySettings, ClusterService clusterService) {
+ public PeerRecoverySourceService(Settings settings, TransportService transportService, IndicesService indicesService,
+ RecoverySettings recoverySettings, ClusterService clusterService) {
super(settings);
this.transportService = transportService;
this.indicesService = indicesService;
@@ -97,8 +97,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
ShardRouting routingEntry = shard.routingEntry();
- if (request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION &&
- (routingEntry.relocating() == false || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) {
+ if (request.isPrimaryRelocation() && (routingEntry.relocating() == false || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) {
logger.debug("delaying recovery of {} as source shard is not marked yet as relocating to {}", request.shardId(), request.targetNode());
throw new DelayRecoveryException("source shard is not marked yet as relocating to [" + request.targetNode() + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
index b6fed0e9a4..f26d0787f4 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.indices.recovery;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.RateLimiter;
import org.elasticsearch.ElasticsearchException;
@@ -68,7 +70,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
* Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and
* not several of them (since we don't allocate several shard replicas to the same node).
*/
-public class RecoveryTargetService extends AbstractComponent implements IndexEventListener {
+public class PeerRecoveryTargetService extends AbstractComponent implements IndexEventListener {
public static class Actions {
public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo";
@@ -90,7 +92,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
private final RecoveriesCollection onGoingRecoveries;
@Inject
- public RecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings
+ public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings
recoverySettings, ClusterService clusterService) {
super(settings);
this.threadPool = threadPool;
@@ -133,8 +135,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason);
}
- public void startRecovery(final IndexShard indexShard, final RecoveryState.Type recoveryType, final DiscoveryNode sourceNode, final
- RecoveryListener listener) {
+ public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) {
// create a new recovery status, and process...
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
threadPool.generic().execute(new RecoveryRunner(recoveryId));
@@ -142,7 +143,9 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final
StartRecoveryRequest currentRequest) {
- logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryTarget.recoveryId(), retryAfter);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "will retry recovery with id [{}] in [{}]", recoveryTarget.recoveryId(), retryAfter), reason);
retryRecovery(recoveryTarget, retryAfter, currentRequest);
}
@@ -189,8 +192,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
}
logger.trace("{} local file count: [{}]", recoveryTarget, metadataSnapshot.size());
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(),
- clusterService.localNode(),
- metadataSnapshot, recoveryTarget.state().getType(), recoveryTarget.recoveryId());
+ clusterService.localNode(), metadataSnapshot, recoveryTarget.state().getPrimary(), recoveryTarget.recoveryId());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
try {
@@ -198,7 +200,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
.sourceNode());
recoveryTarget.indexShard().prepareForIndexRecovery();
recoveryTarget.CancellableThreads().execute(() -> responseHolder.set(
- transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request,
+ transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request,
new FutureTransportResponseHandler<RecoveryResponse>() {
@Override
public RecoveryResponse newInstance() {
@@ -235,7 +237,12 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
logger.trace("recovery cancelled", e);
} catch (Exception e) {
if (logger.isTraceEnabled()) {
- logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id());
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}][{}] Got exception on recovery",
+ request.shardId().getIndex().getName(),
+ request.shardId().id()),
+ e);
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
@@ -297,7 +304,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
)) {
- recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps());
+ recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps(), request.getMaxUnsafeAutoIdTimestamp());
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
@@ -347,8 +354,11 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
// which causes local mapping changes since the mapping (clusterstate) might not have arrived on this node.
// we want to wait until these mappings are processed but also need to do some maintenance and roll back the
// number of processed (completed) operations in this batch to ensure accounting is correct.
- logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception
- .completedOperations());
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)",
+ exception.completedOperations()),
+ exception);
final RecoveryState.Translog translog = recoveryTarget.state().getTranslog();
translog.decrementRecoveredOperations(exception.completedOperations()); // do the maintainance and rollback competed ops
// we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be
@@ -427,8 +437,12 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion,
observer.observedState().getVersion());
} catch (Exception e) {
- logger.debug("failed waiting for cluster state with version {} (current: {})", e, clusterStateVersion,
- observer.observedState());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed waiting for cluster state with version {} (current: {})",
+ clusterStateVersion,
+ observer.observedState()),
+ e);
throw ExceptionsHelper.convertToRuntime(e);
}
}
@@ -506,13 +520,17 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
public void onFailure(Exception e) {
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
if (recoveryRef != null) {
- logger.error("unexpected error during recovery [{}], failing shard", e, recoveryId);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "unexpected error during recovery [{}], failing shard", recoveryId), e);
onGoingRecoveries.failRecovery(recoveryId,
new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", e),
true // be safe
);
} else {
- logger.debug("unexpected error during recovery, but recovery id [{}] is finished", e, recoveryId);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
index 69a55e03c9..d2e07bd9e4 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
@@ -57,7 +57,7 @@ public class RecoverFilesRecoveryException extends ElasticsearchException implem
public RecoverFilesRecoveryException(StreamInput in) throws IOException{
super(in);
numberOfFiles = in.readInt();
- totalFilesSize = ByteSizeValue.readBytesSizeValue(in);
+ totalFilesSize = new ByteSizeValue(in);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java
index 0d3ee87e3b..65a48b18e2 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java
@@ -19,9 +19,11 @@
package org.elasticsearch.indices.recovery;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@@ -46,11 +48,11 @@ public class RecoveriesCollection {
/** This is the single source of truth for ongoing recoveries. If it's not here, it was canceled or done */
private final ConcurrentMap<Long, RecoveryTarget> onGoingRecoveries = ConcurrentCollections.newConcurrentMap();
- private final ESLogger logger;
+ private final Logger logger;
private final ThreadPool threadPool;
private final Callback<Long> ensureClusterStateVersionCallback;
- public RecoveriesCollection(ESLogger logger, ThreadPool threadPool, Callback<Long> ensureClusterStateVersionCallback) {
+ public RecoveriesCollection(Logger logger, ThreadPool threadPool, Callback<Long> ensureClusterStateVersionCallback) {
this.logger = logger;
this.threadPool = threadPool;
this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback;
@@ -62,7 +64,7 @@ public class RecoveriesCollection {
* @return the id of the new recovery.
*/
public long startRecovery(IndexShard indexShard, DiscoveryNode sourceNode,
- RecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout) {
+ PeerRecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout) {
RecoveryTarget status = new RecoveryTarget(indexShard, sourceNode, listener, ensureClusterStateVersionCallback);
RecoveryTarget existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status);
assert existingStatus == null : "found two RecoveryStatus instances with the same id";
@@ -222,7 +224,7 @@ public class RecoveriesCollection {
@Override
public void onFailure(Exception e) {
- logger.error("unexpected error while monitoring recovery [{}]", e, recoveryId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java
index 3d5d7052c9..171102d07e 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.recovery;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
@@ -31,6 +32,7 @@ import java.io.IOException;
*/
public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
+ private long maxUnsafeAutoIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
private long recoveryId;
private ShardId shardId;
private int totalTranslogOps = RecoveryState.Translog.UNKNOWN;
@@ -38,10 +40,11 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
public RecoveryPrepareForTranslogOperationsRequest() {
}
- RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) {
+ RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, long maxUnsafeAutoIdTimestamp) {
this.recoveryId = recoveryId;
this.shardId = shardId;
this.totalTranslogOps = totalTranslogOps;
+ this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp;
}
public long recoveryId() {
@@ -56,12 +59,17 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
return totalTranslogOps;
}
+ public long getMaxUnsafeAutoIdTimestamp() {
+ return maxUnsafeAutoIdTimestamp;
+ }
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
recoveryId = in.readLong();
shardId = ShardId.readShardId(in);
totalTranslogOps = in.readVInt();
+ maxUnsafeAutoIdTimestamp = in.readLong();
}
@Override
@@ -70,5 +78,6 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
out.writeLong(recoveryId);
shardId.writeTo(out);
out.writeVInt(totalTranslogOps);
+ out.writeLong(maxUnsafeAutoIdTimestamp);
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
index 8259545847..6c4e484a2d 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
@@ -61,7 +61,7 @@ public class RecoverySettings extends AbstractComponent {
*/
public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING =
Setting.timeSetting("indices.recovery.internal_action_long_timeout",
- (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(),
+ (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2),
TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope);
/**
@@ -70,7 +70,7 @@ public class RecoverySettings extends AbstractComponent {
*/
public static final Setting<TimeValue> INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING =
Setting.timeSetting("indices.recovery.recovery_activity_timeout",
- (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0),
+ INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING::get, TimeValue.timeValueSeconds(0),
Property.Dynamic, Property.NodeScope);
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
@@ -99,10 +99,10 @@ public class RecoverySettings extends AbstractComponent {
this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings);
this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings);
- if (maxBytesPerSec.bytes() <= 0) {
+ if (maxBytesPerSec.getBytes() <= 0) {
rateLimiter = null;
} else {
- rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
+ rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
}
@@ -172,12 +172,12 @@ public class RecoverySettings extends AbstractComponent {
private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) {
this.maxBytesPerSec = maxBytesPerSec;
- if (maxBytesPerSec.bytes() <= 0) {
+ if (maxBytesPerSec.getBytes() <= 0) {
rateLimiter = null;
} else if (rateLimiter != null) {
- rateLimiter.setMBPerSec(maxBytesPerSec.mbFrac());
+ rateLimiter.setMBPerSec(maxBytesPerSec.getMbFrac());
} else {
- rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
+ rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
index b226af7858..790376ba78 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
@@ -19,6 +19,8 @@
package org.elasticsearch.indices.recovery;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooNewException;
@@ -35,7 +37,6 @@ import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.CancellableThreads;
@@ -71,7 +72,7 @@ import java.util.stream.StreamSupport;
*/
public class RecoverySourceHandler {
- protected final ESLogger logger;
+ protected final Logger logger;
// Shard that is going to be recovered (the "source")
private final IndexShard shard;
private final String indexName;
@@ -106,7 +107,7 @@ public class RecoverySourceHandler {
final Supplier<Long> currentClusterStateVersionSupplier,
Function<String, Releasable> delayNewRecoveries,
final int fileChunkSizeInBytes,
- final ESLogger logger) {
+ final Logger logger) {
this.shard = shard;
this.recoveryTarget = recoveryTarget;
this.request = request;
@@ -313,8 +314,12 @@ public class RecoverySourceHandler {
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
"checksums are ok", null);
exception.addSuppressed(targetException);
- logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
- corruptIndexException, shard.shardId(), request.targetNode());
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
+ "{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
+ shard.shardId(),
+ request.targetNode()),
+ corruptIndexException);
throw exception;
} else {
throw targetException;
@@ -341,7 +346,8 @@ public class RecoverySourceHandler {
// Send a request preparing the new shard's translog to receive
// operations. This ensures the shard engine is started and disables
// garbage collection (not the JVM's GC!) of tombstone deletes
- cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps));
+ cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps,
+ shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp()));
stopWatch.stop();
response.startTime = stopWatch.totalTime().millis() - startEngineStart;
@@ -386,7 +392,7 @@ public class RecoverySourceHandler {
logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode());
cancellableThreads.execute(recoveryTarget::finalizeRecovery);
- if (isPrimaryRelocation()) {
+ if (request.isPrimaryRelocation()) {
// in case of primary relocation we have to ensure that the cluster state on the primary relocation target has all
// replica shards that have recovered or are still recovering from the current primary, otherwise replication actions
// will not be send to these replicas. To accomplish this, first block new recoveries, then take version of latest cluster
@@ -410,10 +416,6 @@ public class RecoverySourceHandler {
indexName, shardId, request.targetNode(), stopWatch.totalTime());
}
- protected boolean isPrimaryRelocation() {
- return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION;
- }
-
/**
* Send the given snapshot's operations to this handler's target node.
* <p>
@@ -560,8 +562,13 @@ public class RecoverySourceHandler {
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
"checksums are ok", null);
exception.addSuppressed(e);
- logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK",
- corruptIndexException, shardId, request.targetNode(), md);
+ logger.warn(
+ (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
+ "{} Remote file corruption on node {}, recovering {}. local checksum OK",
+ shardId,
+ request.targetNode(),
+ md),
+ corruptIndexException);
throw exception;
}
} else {
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java
index a742c582a9..919601de94 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java
@@ -20,7 +20,8 @@
package org.elasticsearch.indices.recovery;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -96,40 +97,6 @@ public class RecoveryState implements ToXContent, Streamable {
}
}
- public enum Type {
- STORE((byte) 0),
- SNAPSHOT((byte) 1),
- REPLICA((byte) 2),
- PRIMARY_RELOCATION((byte) 3),
- LOCAL_SHARDS((byte) 4);
-
- private static final Type[] TYPES = new Type[Type.values().length];
-
- static {
- for (Type type : Type.values()) {
- assert type.id() < TYPES.length && type.id() >= 0;
- TYPES[type.id] = type;
- }
- }
-
- private final byte id;
-
- Type(byte id) {
- this.id = id;
- }
-
- public byte id() {
- return id;
- }
-
- public static Type fromId(byte id) {
- if (id < 0 || id >= TYPES.length) {
- throw new IllegalArgumentException("No mapping for id [" + id + "]");
- }
- return TYPES[id];
- }
- }
-
private Stage stage;
private final Index index = new Index();
@@ -137,9 +104,9 @@ public class RecoveryState implements ToXContent, Streamable {
private final VerifyIndex verifyIndex = new VerifyIndex();
private final Timer timer = new Timer();
- private Type type;
+ private RecoverySource recoverySource;
private ShardId shardId;
- private RestoreSource restoreSource;
+ @Nullable
private DiscoveryNode sourceNode;
private DiscoveryNode targetNode;
private boolean primary = false;
@@ -147,20 +114,15 @@ public class RecoveryState implements ToXContent, Streamable {
private RecoveryState() {
}
- public RecoveryState(ShardId shardId, boolean primary, Type type, DiscoveryNode sourceNode, DiscoveryNode targetNode) {
- this(shardId, primary, type, sourceNode, null, targetNode);
- }
-
- public RecoveryState(ShardId shardId, boolean primary, Type type, RestoreSource restoreSource, DiscoveryNode targetNode) {
- this(shardId, primary, type, null, restoreSource, targetNode);
- }
-
- private RecoveryState(ShardId shardId, boolean primary, Type type, @Nullable DiscoveryNode sourceNode, @Nullable RestoreSource restoreSource, DiscoveryNode targetNode) {
- this.shardId = shardId;
- this.primary = primary;
- this.type = type;
+ public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode) {
+ assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting;
+ RecoverySource recoverySource = shardRouting.recoverySource();
+ assert (recoverySource.getType() == RecoverySource.Type.PEER) == (sourceNode != null) :
+ "peer recovery requires source node, recovery type: " + recoverySource.getType() + " source node: " + sourceNode;
+ this.shardId = shardRouting.shardId();
+ this.primary = shardRouting.primary();
+ this.recoverySource = recoverySource;
this.sourceNode = sourceNode;
- this.restoreSource = restoreSource;
this.targetNode = targetNode;
stage = Stage.INIT;
timer.start();
@@ -237,10 +199,14 @@ public class RecoveryState implements ToXContent, Streamable {
return timer;
}
- public Type getType() {
- return type;
+ public RecoverySource getRecoverySource() {
+ return recoverySource;
}
+ /**
+ * Returns recovery source node (only non-null if peer recovery)
+ */
+ @Nullable
public DiscoveryNode getSourceNode() {
return sourceNode;
}
@@ -249,10 +215,6 @@ public class RecoveryState implements ToXContent, Streamable {
return targetNode;
}
- public RestoreSource getRestoreSource() {
- return restoreSource;
- }
-
public boolean getPrimary() {
return primary;
}
@@ -266,14 +228,11 @@ public class RecoveryState implements ToXContent, Streamable {
@Override
public synchronized void readFrom(StreamInput in) throws IOException {
timer.readFrom(in);
- type = Type.fromId(in.readByte());
stage = Stage.fromId(in.readByte());
shardId = ShardId.readShardId(in);
- restoreSource = RestoreSource.readOptionalRestoreSource(in);
+ recoverySource = RecoverySource.readFrom(in);
targetNode = new DiscoveryNode(in);
- if (in.readBoolean()) {
- sourceNode = new DiscoveryNode(in);
- }
+ sourceNode = in.readOptionalWriteable(DiscoveryNode::new);
index.readFrom(in);
translog.readFrom(in);
verifyIndex.readFrom(in);
@@ -283,15 +242,11 @@ public class RecoveryState implements ToXContent, Streamable {
@Override
public void writeTo(StreamOutput out) throws IOException {
timer.writeTo(out);
- out.writeByte(type.id());
out.writeByte(stage.id());
shardId.writeTo(out);
- out.writeOptionalStreamable(restoreSource);
+ recoverySource.writeTo(out);
targetNode.writeTo(out);
- out.writeBoolean(sourceNode != null);
- if (sourceNode != null) {
- sourceNode.writeTo(out);
- }
+ out.writeOptionalWriteable(sourceNode);
index.writeTo(out);
translog.writeTo(out);
verifyIndex.writeTo(out);
@@ -302,19 +257,16 @@ public class RecoveryState implements ToXContent, Streamable {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.ID, shardId.id());
- builder.field(Fields.TYPE, type.toString());
+ builder.field(Fields.TYPE, recoverySource.getType());
builder.field(Fields.STAGE, stage.toString());
builder.field(Fields.PRIMARY, primary);
- builder.dateValueField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime);
+ builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime);
if (timer.stopTime > 0) {
- builder.dateValueField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime);
+ builder.dateField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime);
}
builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, timer.time());
- if (restoreSource != null) {
- builder.field(Fields.SOURCE);
- restoreSource.toXContent(builder, params);
- } else {
+ if (recoverySource.getType() == RecoverySource.Type.PEER) {
builder.startObject(Fields.SOURCE);
builder.field(Fields.ID, sourceNode.getId());
builder.field(Fields.HOST, sourceNode.getHostName());
@@ -322,6 +274,10 @@ public class RecoveryState implements ToXContent, Streamable {
builder.field(Fields.IP, sourceNode.getHostAddress());
builder.field(Fields.NAME, sourceNode.getName());
builder.endObject();
+ } else {
+ builder.startObject(Fields.SOURCE);
+ recoverySource.addAdditionalFields(builder, params);
+ builder.endObject();
}
builder.startObject(Fields.TARGET);
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
index 4a503b7dad..d608dc50e2 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
@@ -19,6 +19,9 @@
package org.elasticsearch.indices.recovery;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
@@ -31,7 +34,6 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.Callback;
@@ -62,7 +64,7 @@ import java.util.concurrent.atomic.AtomicLong;
*/
public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler {
- private final ESLogger logger;
+ private final Logger logger;
private static final AtomicLong idGenerator = new AtomicLong();
@@ -74,7 +76,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
private final DiscoveryNode sourceNode;
private final String tempFilePrefix;
private final Store store;
- private final RecoveryTargetService.RecoveryListener listener;
+ private final PeerRecoveryTargetService.RecoveryListener listener;
private final Callback<Long> ensureClusterStateVersionCallback;
private final AtomicBoolean finished = new AtomicBoolean();
@@ -92,7 +94,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
copyFrom.ensureClusterStateVersionCallback);
}
- public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener,
+ public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener,
Callback<Long> ensureClusterStateVersionCallback) {
this(indexShard, sourceNode, listener, new CancellableThreads(), idGenerator.incrementAndGet(), ensureClusterStateVersionCallback);
}
@@ -106,7 +108,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
* version. Necessary for primary relocation so that new primary knows about all other ongoing
* replica recoveries when replicating documents (see {@link RecoverySourceHandler}).
*/
- private RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener,
+ private RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener,
CancellableThreads cancellableThreads, long recoveryId, Callback<Long> ensureClusterStateVersionCallback) {
super("recovery_status");
this.cancellableThreads = cancellableThreads;
@@ -293,7 +295,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
try {
entry.getValue().close();
} catch (Exception e) {
- logger.debug("error while closing recovery output [{}]", e, entry.getValue());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
}
iterator.remove();
}
@@ -324,9 +327,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
/*** Implementation of {@link RecoveryTargetHandler } */
@Override
- public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
+ public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
state().getTranslog().totalOperations(totalTranslogOps);
- indexShard().skipTranslogRecovery();
+ indexShard().skipTranslogRecovery(maxUnsafeAutoIdTimestamp);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java
index 3d7e4f29c3..1896602879 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java
@@ -33,8 +33,10 @@ public interface RecoveryTargetHandler {
* Prepares the tranget to receive translog operations, after all file have been copied
*
* @param totalTranslogOps total translog operations expected to be sent
+ * @param maxUnsafeAutoIdTimestamp the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine.
+ * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs
*/
- void prepareForTranslogOperations(int totalTranslogOps) throws IOException;
+ void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException;
/**
* The finalize request clears unreferenced translog files, refreshes the engine now that
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java
index 32cbce5519..327eb3b8ec 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java
@@ -74,16 +74,16 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
}
@Override
- public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.PREPARE_TRANSLOG,
- new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps),
+ public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG,
+ new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, maxUnsafeAutoIdTimestamp),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
@Override
public void finalizeRecovery() {
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FINALIZE,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FINALIZE,
new RecoveryFinalizeRecoveryRequest(recoveryId, shardId),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
@@ -91,7 +91,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
@Override
public void ensureClusterStateVersion(long clusterStateVersion) {
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.WAIT_CLUSTERSTATE,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.WAIT_CLUSTERSTATE,
new RecoveryWaitForClusterStateRequest(recoveryId, shardId, clusterStateVersion),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
@@ -101,7 +101,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
recoveryId, shardId, operations, totalTranslogOps);
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.TRANSLOG_OPS, translogOperationsRequest,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.TRANSLOG_OPS, translogOperationsRequest,
translogOpsRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
@@ -111,7 +111,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(recoveryId, shardId,
phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, totalTranslogOps);
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest,
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
@@ -119,7 +119,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
@Override
public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.CLEAN_FILES,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.CLEAN_FILES,
new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
@@ -150,7 +150,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
throttleTimeInNanos = 0;
}
- transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILE_CHUNK,
+ transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK,
new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk,
totalTranslogOps,
/* we send totalOperations with every request since we collect stats on the target and that way we can
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java
index 67492affaf..591176f047 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java
@@ -19,8 +19,8 @@
package org.elasticsearch.indices.recovery;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.translog.Translog;
@@ -39,7 +39,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
public SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request,
Supplier<Long> currentClusterStateVersionSupplier,
- Function<String, Releasable> delayNewRecoveries, ESLogger logger) {
+ Function<String, Releasable> delayNewRecoveries, Logger logger) {
super(shard, recoveryTarget, request, currentClusterStateVersionSupplier, delayNewRecoveries, -1, logger);
this.shard = shard;
this.request = request;
@@ -50,7 +50,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
boolean engineClosed = false;
try {
logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode());
- if (isPrimaryRelocation()) {
+ if (request.isPrimaryRelocation()) {
logger.debug("[phase1] closing engine on primary for shared filesystem recovery");
try {
// if we relocate we need to close the engine in order to open a new
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java
index ba2e1f42a5..9aa56fd8cb 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java
@@ -20,6 +20,7 @@
package org.elasticsearch.indices.recovery;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
@@ -43,7 +44,7 @@ public class StartRecoveryRequest extends TransportRequest {
private Store.MetadataSnapshot metadataSnapshot;
- private RecoveryState.Type recoveryType;
+ private boolean primaryRelocation;
public StartRecoveryRequest() {
}
@@ -54,13 +55,13 @@ public class StartRecoveryRequest extends TransportRequest {
* @param sourceNode The node to recover from
* @param targetNode The node to recover to
*/
- public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
+ public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, boolean primaryRelocation, long recoveryId) {
this.recoveryId = recoveryId;
this.shardId = shardId;
this.sourceNode = sourceNode;
this.targetNode = targetNode;
- this.recoveryType = recoveryType;
this.metadataSnapshot = metadataSnapshot;
+ this.primaryRelocation = primaryRelocation;
}
public long recoveryId() {
@@ -79,8 +80,8 @@ public class StartRecoveryRequest extends TransportRequest {
return targetNode;
}
- public RecoveryState.Type recoveryType() {
- return recoveryType;
+ public boolean isPrimaryRelocation() {
+ return primaryRelocation;
}
public Store.MetadataSnapshot metadataSnapshot() {
@@ -95,8 +96,7 @@ public class StartRecoveryRequest extends TransportRequest {
sourceNode = new DiscoveryNode(in);
targetNode = new DiscoveryNode(in);
metadataSnapshot = new Store.MetadataSnapshot(in);
- recoveryType = RecoveryState.Type.fromId(in.readByte());
-
+ primaryRelocation = in.readBoolean();
}
@Override
@@ -107,7 +107,7 @@ public class StartRecoveryRequest extends TransportRequest {
sourceNode.writeTo(out);
targetNode.writeTo(out);
metadataSnapshot.writeTo(out);
- out.writeByte(recoveryType.id());
+ out.writeBoolean(primaryRelocation);
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
index bc7e7f59fc..439806b454 100644
--- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
+++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
@@ -19,6 +19,8 @@
package org.elasticsearch.indices.store;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@@ -228,7 +230,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
@Override
public void handleException(TransportException exp) {
- logger.debug("shards active request failed for {}", exp, shardId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp);
if (awaitingResponses.decrementAndGet() == 0) {
allNodesResponded();
}
@@ -266,14 +268,14 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
try {
indicesService.deleteShardStore("no longer used", shardId, currentState);
} catch (Exception ex) {
- logger.debug("{} failed to delete unallocated shard, ignoring", ex, shardId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex);
}
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
- logger.error("{} unexpected error during deletion of unallocated shard", e, shardId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e);
}
});
}
@@ -323,9 +325,9 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
try {
channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode()));
} catch (IOException e) {
- logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
} catch (EsRejectedExecutionException e) {
- logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
}
}
}, new ClusterStateObserver.ValidationPredicate() {
diff --git a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java
index 3f725c43b2..88105420e1 100644
--- a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java
+++ b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java
@@ -107,15 +107,14 @@ public final class ConfigurationUtils {
value.getClass().getName() + "]");
}
-
/**
* Returns and removes the specified property from the specified configuration map.
*
* If the property value isn't of type int a {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ElasticsearchParseException} is thrown
*/
- public static int readIntProperty(String processorType, String processorTag, Map<String, Object> configuration,
- String propertyName, int defaultValue) {
+ public static Integer readIntProperty(String processorType, String processorTag, Map<String, Object> configuration,
+ String propertyName, Integer defaultValue) {
Object value = configuration.remove(propertyName);
if (value == null) {
return defaultValue;
@@ -225,7 +224,13 @@ public final class ConfigurationUtils {
public static ElasticsearchException newConfigurationException(String processorType, String processorTag,
String propertyName, String reason) {
- ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
+ String msg;
+ if (propertyName == null) {
+ msg = reason;
+ } else {
+ msg = "[" + propertyName + "] " + reason;
+ }
+ ElasticsearchParseException exception = new ElasticsearchParseException(msg);
addHeadersToException(exception, processorType, processorTag, propertyName);
return exception;
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java
index f61abdd48d..8010c6e6c3 100644
--- a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java
+++ b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java
@@ -166,6 +166,17 @@ public final class IngestDocument {
* @throws IllegalArgumentException if the path is null, empty or invalid.
*/
public boolean hasField(String path) {
+ return hasField(path, false);
+ }
+
+ /**
+ * Checks whether the document contains a value for the provided path
+ * @param path The path within the document in dot-notation
+ * @param failOutOfRange Whether to throw an IllegalArgumentException if array is accessed outside of its range
+ * @return true if the document contains a value for the field, false otherwise
+ * @throws IllegalArgumentException if the path is null, empty or invalid.
+ */
+ public boolean hasField(String path, boolean failOutOfRange) {
FieldPath fieldPath = new FieldPath(path);
Object context = fieldPath.initialContext;
for (int i = 0; i < fieldPath.pathElements.length - 1; i++) {
@@ -183,7 +194,12 @@ public final class IngestDocument {
try {
int index = Integer.parseInt(pathElement);
if (index < 0 || index >= list.size()) {
- return false;
+ if (failOutOfRange) {
+ throw new IllegalArgumentException("[" + index + "] is out of bounds for array with length [" +
+ list.size() + "] as part of path [" + path +"]");
+ } else {
+ return false;
+ }
}
context = list.get(index);
} catch (NumberFormatException e) {
@@ -206,7 +222,16 @@ public final class IngestDocument {
List<Object> list = (List<Object>) context;
try {
int index = Integer.parseInt(leafKey);
- return index >= 0 && index < list.size();
+ if (index >= 0 && index < list.size()) {
+ return true;
+ } else {
+ if (failOutOfRange) {
+ throw new IllegalArgumentException("[" + index + "] is out of bounds for array with length [" +
+ list.size() + "] as part of path [" + path +"]");
+ } else {
+ return false;
+ }
+ }
} catch (NumberFormatException e) {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestService.java b/core/src/main/java/org/elasticsearch/ingest/IngestService.java
index 07e2aa1fe5..5249ed7a7d 100644
--- a/core/src/main/java/org/elasticsearch/ingest/IngestService.java
+++ b/core/src/main/java/org/elasticsearch/ingest/IngestService.java
@@ -27,6 +27,7 @@ import java.util.Map;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.plugins.IngestPlugin;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
@@ -40,10 +41,12 @@ public class IngestService {
private final PipelineExecutionService pipelineExecutionService;
public IngestService(Settings settings, ThreadPool threadPool,
- Environment env, ScriptService scriptService, List<IngestPlugin> ingestPlugins) {
+ Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry,
+ List<IngestPlugin> ingestPlugins) {
+
final TemplateService templateService = new InternalTemplateService(scriptService);
Processor.Parameters parameters = new Processor.Parameters(env, scriptService, templateService,
- threadPool.getThreadContext());
+ analysisRegistry, threadPool.getThreadContext());
Map<String, Processor.Factory> processorFactories = new HashMap<>();
for (IngestPlugin ingestPlugin : ingestPlugins) {
Map<String, Processor.Factory> newProcessors = ingestPlugin.getProcessors(parameters);
diff --git a/core/src/main/java/org/elasticsearch/ingest/Pipeline.java b/core/src/main/java/org/elasticsearch/ingest/Pipeline.java
index e29d206543..4a705c43ba 100644
--- a/core/src/main/java/org/elasticsearch/ingest/Pipeline.java
+++ b/core/src/main/java/org/elasticsearch/ingest/Pipeline.java
@@ -20,6 +20,7 @@
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Nullable;
import java.util.Arrays;
import java.util.Collections;
@@ -33,16 +34,21 @@ public final class Pipeline {
static final String DESCRIPTION_KEY = "description";
static final String PROCESSORS_KEY = "processors";
+ static final String VERSION_KEY = "version";
static final String ON_FAILURE_KEY = "on_failure";
private final String id;
+ @Nullable
private final String description;
+ @Nullable
+ private final Integer version;
private final CompoundProcessor compoundProcessor;
- public Pipeline(String id, String description, CompoundProcessor compoundProcessor) {
+ public Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor) {
this.id = id;
this.description = description;
this.compoundProcessor = compoundProcessor;
+ this.version = version;
}
/**
@@ -62,11 +68,22 @@ public final class Pipeline {
/**
* An optional description of what this pipeline is doing to the data gets processed by this pipeline.
*/
+ @Nullable
public String getDescription() {
return description;
}
/**
+ * An optional version stored with the pipeline so that it can be used to determine if the pipeline should be updated / replaced.
+ *
+ * @return {@code null} if not supplied.
+ */
+ @Nullable
+ public Integer getVersion() {
+ return version;
+ }
+
+ /**
* Get the underlying {@link CompoundProcessor} containing the Pipeline's processors
*/
public CompoundProcessor getCompoundProcessor() {
@@ -100,6 +117,7 @@ public final class Pipeline {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorFactories) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
+ Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null);
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories);
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs =
@@ -114,7 +132,7 @@ public final class Pipeline {
}
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors),
Collections.unmodifiableList(onFailureProcessors));
- return new Pipeline(id, description, compoundProcessor);
+ return new Pipeline(id, description, version, compoundProcessor);
}
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java
index 713001c233..94850674e7 100644
--- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java
+++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java
@@ -24,6 +24,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
@@ -62,12 +63,13 @@ public class PipelineStore extends AbstractComponent implements ClusterStateList
@Override
public void clusterChanged(ClusterChangedEvent event) {
- innerUpdatePipelines(event.state());
+ innerUpdatePipelines(event.previousState(), event.state());
}
- void innerUpdatePipelines(ClusterState state) {
+ void innerUpdatePipelines(ClusterState previousState, ClusterState state) {
IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE);
- if (ingestMetadata == null) {
+ IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE);
+ if (Objects.equals(ingestMetadata, previousIngestMetadata)) {
return;
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/Processor.java b/core/src/main/java/org/elasticsearch/ingest/Processor.java
index ef1cd882d2..af4ea954dd 100644
--- a/core/src/main/java/org/elasticsearch/ingest/Processor.java
+++ b/core/src/main/java/org/elasticsearch/ingest/Processor.java
@@ -21,6 +21,7 @@ package org.elasticsearch.ingest;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.script.ScriptService;
import java.util.Map;
@@ -87,17 +88,23 @@ public interface Processor {
public final TemplateService templateService;
/**
+ * Provide analyzer support
+ */
+ public final AnalysisRegistry analysisRegistry;
+
+ /**
* Allows processors to read headers set by {@link org.elasticsearch.action.support.ActionFilter}
* instances that have run prior to in ingest.
*/
public final ThreadContext threadContext;
public Parameters(Environment env, ScriptService scriptService, TemplateService templateService,
- ThreadContext threadContext) {
+ AnalysisRegistry analysisRegistry, ThreadContext threadContext) {
this.env = env;
this.scriptService = scriptService;
this.templateService = templateService;
this.threadContext = threadContext;
+ this.analysisRegistry = analysisRegistry;
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java
index 641dc3a5bb..bf7dce9c0d 100644
--- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java
@@ -386,6 +386,30 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
out.writeLong(totalWriteKilobytes);
}
+ public DeviceStats[] getDevicesStats() {
+ return devicesStats;
+ }
+
+ public long getTotalOperations() {
+ return totalOperations;
+ }
+
+ public long getTotalReadOperations() {
+ return totalReadOperations;
+ }
+
+ public long getTotalWriteOperations() {
+ return totalWriteOperations;
+ }
+
+ public long getTotalReadKilobytes() {
+ return totalReadKilobytes;
+ }
+
+ public long getTotalWriteKilobytes() {
+ return totalWriteKilobytes;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (devicesStats.length > 0) {
@@ -410,16 +434,16 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
}
- final long timestamp;
- final Path[] paths;
- final IoStats ioStats;
- Path total;
+ private final long timestamp;
+ private final Path[] paths;
+ private final IoStats ioStats;
+ private final Path total;
public FsInfo(long timestamp, IoStats ioStats, Path[] paths) {
this.timestamp = timestamp;
this.ioStats = ioStats;
this.paths = paths;
- this.total = null;
+ this.total = total();
}
/**
@@ -432,6 +456,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
for (int i = 0; i < paths.length; i++) {
paths[i] = new Path(in);
}
+ this.total = total();
}
@Override
@@ -445,13 +470,10 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
}
public Path getTotal() {
- return total();
+ return total;
}
- public Path total() {
- if (total != null) {
- return total;
- }
+ private Path total() {
Path res = new Path();
Set<String> seenDevices = new HashSet<>(paths.length);
for (Path subPath : paths) {
@@ -462,7 +484,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
}
res.add(subPath);
}
- total = res;
return res;
}
@@ -506,5 +527,4 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
static final String TOTAL = "total";
static final String IO_STATS = "io_stats";
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
index f0b6155692..4cdbed367c 100644
--- a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
+++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
@@ -19,6 +19,8 @@
package org.elasticsearch.monitor.fs;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.collect.Tuple;
@@ -112,7 +114,9 @@ public class FsProbe extends AbstractComponent {
} catch (Exception e) {
// do not fail Elasticsearch if something unexpected
// happens here
- logger.debug("unexpected exception processing /proc/diskstats for devices {}", e, devicesNumbers);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
return null;
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java
index 9f38538f26..96467b4d40 100644
--- a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java
+++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java
@@ -19,8 +19,8 @@
package org.elasticsearch.monitor.fs;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -55,7 +55,7 @@ public class FsService extends AbstractComponent {
return cache.getOrRefresh();
}
- private static FsInfo stats(FsProbe probe, FsInfo initialValue, ESLogger logger) {
+ private static FsInfo stats(FsProbe probe, FsInfo initialValue, Logger logger) {
try {
return probe.stats(initialValue);
} catch (IOException e) {
diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java
index a842ba2849..3a19fe5bd0 100644
--- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java
+++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java
@@ -19,14 +19,13 @@
package org.elasticsearch.monitor.jvm;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
@@ -36,7 +35,6 @@ import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
-import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
@@ -207,7 +205,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent {
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}";
static void logSlowGc(
- final ESLogger logger,
+ final Logger logger,
final JvmMonitor.Threshold threshold,
final long seq,
final JvmMonitor.SlowGcEvent slowGcEvent,
@@ -307,7 +305,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent {
private static final String OVERHEAD_LOG_MESSAGE = "[gc][{}] overhead, spent [{}] collecting in the last [{}]";
static void logGcOverhead(
- final ESLogger logger,
+ final Logger logger,
final JvmMonitor.Threshold threshold,
final long current,
final long elapsed,
diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
index 1619ecee23..e277faafd5 100644
--- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
@@ -19,10 +19,9 @@
package org.elasticsearch.monitor.jvm;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -37,14 +36,10 @@ import java.lang.management.PlatformManagedObject;
import java.lang.management.RuntimeMXBean;
import java.lang.reflect.Method;
import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
-/**
- *
- */
-public class JvmInfo implements Streamable, ToXContent {
+public class JvmInfo implements Writeable, ToXContent {
private static JvmInfo INSTANCE;
@@ -61,100 +56,114 @@ public class JvmInfo implements Streamable, ToXContent {
} catch (Exception e) {
pid = -1;
}
- JvmInfo info = new JvmInfo();
- info.pid = pid;
- info.startTime = runtimeMXBean.getStartTime();
- info.version = System.getProperty("java.version");
- info.vmName = runtimeMXBean.getVmName();
- info.vmVendor = runtimeMXBean.getVmVendor();
- info.vmVersion = runtimeMXBean.getVmVersion();
- info.mem = new Mem();
- info.mem.heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit();
- info.mem.heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax();
- info.mem.nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit();
- info.mem.nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax();
+
+ long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit();
+ long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax();
+ long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit();
+ long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax();
+ long directMemoryMax = 0;
try {
Class<?> vmClass = Class.forName("sun.misc.VM");
- info.mem.directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null);
+ directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null);
} catch (Exception t) {
// ignore
}
- info.inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]);
+ String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]);
+ Mem mem = new Mem(heapInit, heapMax, nonHeapInit, nonHeapMax, directMemoryMax);
+
+ String bootClassPath;
try {
- info.bootClassPath = runtimeMXBean.getBootClassPath();
+ bootClassPath = runtimeMXBean.getBootClassPath();
} catch (UnsupportedOperationException e) {
// oracle java 9
- info.bootClassPath = System.getProperty("sun.boot.class.path");
- if (info.bootClassPath == null) {
+ bootClassPath = System.getProperty("sun.boot.class.path");
+ if (bootClassPath == null) {
// something else
- info.bootClassPath = "<unknown>";
+ bootClassPath = "<unknown>";
}
}
- info.classPath = runtimeMXBean.getClassPath();
- info.systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties());
+ String classPath = runtimeMXBean.getClassPath();
+ Map<String, String> systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties());
List<GarbageCollectorMXBean> gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans();
- info.gcCollectors = new String[gcMxBeans.size()];
+ String[] gcCollectors = new String[gcMxBeans.size()];
for (int i = 0; i < gcMxBeans.size(); i++) {
GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i);
- info.gcCollectors[i] = gcMxBean.getName();
+ gcCollectors[i] = gcMxBean.getName();
}
List<MemoryPoolMXBean> memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans();
- info.memoryPools = new String[memoryPoolMXBeans.size()];
+ String[] memoryPools = new String[memoryPoolMXBeans.size()];
for (int i = 0; i < memoryPoolMXBeans.size(); i++) {
MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i);
- info.memoryPools[i] = memoryPoolMXBean.getName();
+ memoryPools[i] = memoryPoolMXBean.getName();
}
+ String onError = null;
+ String onOutOfMemoryError = null;
+ String useCompressedOops = "unknown";
+ String useG1GC = "unknown";
+ String useSerialGC = "unknown";
+ long configuredInitialHeapSize = -1;
+ long configuredMaxHeapSize = -1;
try {
@SuppressWarnings("unchecked") Class<? extends PlatformManagedObject> clazz =
- (Class<? extends PlatformManagedObject>)Class.forName("com.sun.management.HotSpotDiagnosticMXBean");
+ (Class<? extends PlatformManagedObject>)Class.forName("com.sun.management.HotSpotDiagnosticMXBean");
Class<?> vmOptionClazz = Class.forName("com.sun.management.VMOption");
PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz);
Method vmOptionMethod = clazz.getMethod("getVMOption", String.class);
Method valueMethod = vmOptionClazz.getMethod("getValue");
try {
- Object onError = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError");
- info.onError = (String) valueMethod.invoke(onError);
+ Object onErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError");
+ onError = (String) valueMethod.invoke(onErrorObject);
+ } catch (Exception ignored) {
+ }
+
+ try {
+ Object onOutOfMemoryErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError");
+ onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryErrorObject);
} catch (Exception ignored) {
}
try {
- Object onOutOfMemoryError = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError");
- info.onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryError);
+ Object useCompressedOopsVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops");
+ useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOptionObject);
} catch (Exception ignored) {
}
try {
- Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops");
- info.useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOption);
+ Object useG1GCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC");
+ useG1GC = (String) valueMethod.invoke(useG1GCVmOptionObject);
} catch (Exception ignored) {
}
try {
- Object useG1GCVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC");
- info.useG1GC = (String) valueMethod.invoke(useG1GCVmOption);
+ Object initialHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize");
+ configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOptionObject));
} catch (Exception ignored) {
}
try {
- Object initialHeapSizeVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize");
- info.configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOption));
+ Object maxHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize");
+ configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOptionObject));
} catch (Exception ignored) {
}
try {
- Object maxHeapSizeVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize");
- info.configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOption));
+ Object useSerialGCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseSerialGC");
+ useSerialGC = (String) valueMethod.invoke(useSerialGCVmOptionObject);
} catch (Exception ignored) {
}
+
} catch (Exception ignored) {
}
- INSTANCE = info;
+ INSTANCE = new JvmInfo(pid, System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(),
+ runtimeMXBean.getVmVendor(), runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize,
+ mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, onOutOfMemoryError,
+ useCompressedOops, useG1GC, useSerialGC);
}
public static JvmInfo jvmInfo() {
@@ -166,40 +175,103 @@ public class JvmInfo implements Streamable, ToXContent {
return INSTANCE;
}
- long pid = -1;
-
- String version = "";
- String vmName = "";
- String vmVersion = "";
- String vmVendor = "";
-
- long startTime = -1;
-
- private long configuredInitialHeapSize;
- private long configuredMaxHeapSize;
-
- Mem mem;
-
- String[] inputArguments;
-
- String bootClassPath;
-
- String classPath;
-
- Map<String, String> systemProperties;
-
- String[] gcCollectors = Strings.EMPTY_ARRAY;
- String[] memoryPools = Strings.EMPTY_ARRAY;
-
- private String onError;
-
- private String onOutOfMemoryError;
-
- private String useCompressedOops = "unknown";
+ private final long pid;
+ private final String version;
+ private final String vmName;
+ private final String vmVersion;
+ private final String vmVendor;
+ private final long startTime;
+ private final long configuredInitialHeapSize;
+ private final long configuredMaxHeapSize;
+ private final Mem mem;
+ private final String[] inputArguments;
+ private final String bootClassPath;
+ private final String classPath;
+ private final Map<String, String> systemProperties;
+ private final String[] gcCollectors;
+ private final String[] memoryPools;
+ private final String onError;
+ private final String onOutOfMemoryError;
+ private final String useCompressedOops;
+ private final String useG1GC;
+ private final String useSerialGC;
+
+ private JvmInfo(long pid, String version, String vmName, String vmVersion, String vmVendor, long startTime,
+ long configuredInitialHeapSize, long configuredMaxHeapSize, Mem mem, String[] inputArguments, String bootClassPath,
+ String classPath, Map<String, String> systemProperties, String[] gcCollectors, String[] memoryPools, String onError,
+ String onOutOfMemoryError, String useCompressedOops, String useG1GC, String useSerialGC) {
+ this.pid = pid;
+ this.version = version;
+ this.vmName = vmName;
+ this.vmVersion = vmVersion;
+ this.vmVendor = vmVendor;
+ this.startTime = startTime;
+ this.configuredInitialHeapSize = configuredInitialHeapSize;
+ this.configuredMaxHeapSize = configuredMaxHeapSize;
+ this.mem = mem;
+ this.inputArguments = inputArguments;
+ this.bootClassPath = bootClassPath;
+ this.classPath = classPath;
+ this.systemProperties = systemProperties;
+ this.gcCollectors = gcCollectors;
+ this.memoryPools = memoryPools;
+ this.onError = onError;
+ this.onOutOfMemoryError = onOutOfMemoryError;
+ this.useCompressedOops = useCompressedOops;
+ this.useG1GC = useG1GC;
+ this.useSerialGC = useSerialGC;
+ }
- private String useG1GC = "unknown";
+ public JvmInfo(StreamInput in) throws IOException {
+ pid = in.readLong();
+ version = in.readString();
+ vmName = in.readString();
+ vmVersion = in.readString();
+ vmVendor = in.readString();
+ startTime = in.readLong();
+ inputArguments = new String[in.readInt()];
+ for (int i = 0; i < inputArguments.length; i++) {
+ inputArguments[i] = in.readString();
+ }
+ bootClassPath = in.readString();
+ classPath = in.readString();
+ systemProperties = in.readMap(StreamInput::readString, StreamInput::readString);
+ mem = new Mem(in);
+ gcCollectors = in.readStringArray();
+ memoryPools = in.readStringArray();
+ useCompressedOops = in.readString();
+ //the following members are only used locally for bootstrap checks, never serialized nor printed out
+ this.configuredMaxHeapSize = -1;
+ this.configuredInitialHeapSize = -1;
+ this.onError = null;
+ this.onOutOfMemoryError = null;
+ this.useG1GC = "unknown";
+ this.useSerialGC = "unknown";
+ }
- private JvmInfo() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(pid);
+ out.writeString(version);
+ out.writeString(vmName);
+ out.writeString(vmVersion);
+ out.writeString(vmVendor);
+ out.writeLong(startTime);
+ out.writeInt(inputArguments.length);
+ for (String inputArgument : inputArguments) {
+ out.writeString(inputArgument);
+ }
+ out.writeString(bootClassPath);
+ out.writeString(classPath);
+ out.writeVInt(this.systemProperties.size());
+ for (Map.Entry<String, String> entry : systemProperties.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ mem.writeTo(out);
+ out.writeStringArray(gcCollectors);
+ out.writeStringArray(memoryPools);
+ out.writeString(useCompressedOops);
}
/**
@@ -354,6 +426,18 @@ public class JvmInfo implements Streamable, ToXContent {
return this.useG1GC;
}
+ public String useSerialGC() {
+ return this.useSerialGC;
+ }
+
+ public String[] getGcCollectors() {
+ return gcCollectors;
+ }
+
+ public String[] getMemoryPools() {
+ return memoryPools;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.JVM);
@@ -362,7 +446,7 @@ public class JvmInfo implements Streamable, ToXContent {
builder.field(Fields.VM_NAME, vmName);
builder.field(Fields.VM_VERSION, vmVersion);
builder.field(Fields.VM_VENDOR, vmVendor);
- builder.dateValueField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime);
+ builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime);
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, mem.heapInit);
@@ -372,8 +456,8 @@ public class JvmInfo implements Streamable, ToXContent {
builder.byteSizeField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, mem.directMemoryMax);
builder.endObject();
- builder.field(Fields.GC_COLLECTORS, gcCollectors);
- builder.field(Fields.MEMORY_POOLS, memoryPools);
+ builder.array(Fields.GC_COLLECTORS, gcCollectors);
+ builder.array(Fields.MEMORY_POOLS, memoryPools);
builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops);
@@ -407,72 +491,37 @@ public class JvmInfo implements Streamable, ToXContent {
static final String USING_COMPRESSED_OOPS = "using_compressed_ordinary_object_pointers";
}
- public static JvmInfo readJvmInfo(StreamInput in) throws IOException {
- JvmInfo jvmInfo = new JvmInfo();
- jvmInfo.readFrom(in);
- return jvmInfo;
- }
+ public static class Mem implements Writeable {
- @Override
- public void readFrom(StreamInput in) throws IOException {
- pid = in.readLong();
- version = in.readString();
- vmName = in.readString();
- vmVersion = in.readString();
- vmVendor = in.readString();
- startTime = in.readLong();
- inputArguments = new String[in.readInt()];
- for (int i = 0; i < inputArguments.length; i++) {
- inputArguments[i] = in.readString();
- }
- bootClassPath = in.readString();
- classPath = in.readString();
- systemProperties = new HashMap<>();
- int size = in.readInt();
- for (int i = 0; i < size; i++) {
- systemProperties.put(in.readString(), in.readString());
- }
- mem = new Mem();
- mem.readFrom(in);
- gcCollectors = in.readStringArray();
- memoryPools = in.readStringArray();
- useCompressedOops = in.readString();
- }
+ private final long heapInit;
+ private final long heapMax;
+ private final long nonHeapInit;
+ private final long nonHeapMax;
+ private final long directMemoryMax;
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeLong(pid);
- out.writeString(version);
- out.writeString(vmName);
- out.writeString(vmVersion);
- out.writeString(vmVendor);
- out.writeLong(startTime);
- out.writeInt(inputArguments.length);
- for (String inputArgument : inputArguments) {
- out.writeString(inputArgument);
+ public Mem(long heapInit, long heapMax, long nonHeapInit, long nonHeapMax, long directMemoryMax) {
+ this.heapInit = heapInit;
+ this.heapMax = heapMax;
+ this.nonHeapInit = nonHeapInit;
+ this.nonHeapMax = nonHeapMax;
+ this.directMemoryMax = directMemoryMax;
}
- out.writeString(bootClassPath);
- out.writeString(classPath);
- out.writeInt(systemProperties.size());
- for (Map.Entry<String, String> entry : systemProperties.entrySet()) {
- out.writeString(entry.getKey());
- out.writeString(entry.getValue());
- }
- mem.writeTo(out);
- out.writeStringArray(gcCollectors);
- out.writeStringArray(memoryPools);
- out.writeString(useCompressedOops);
- }
- public static class Mem implements Streamable {
-
- long heapInit = 0;
- long heapMax = 0;
- long nonHeapInit = 0;
- long nonHeapMax = 0;
- long directMemoryMax = 0;
+ public Mem(StreamInput in) throws IOException {
+ this.heapInit = in.readVLong();
+ this.heapMax = in.readVLong();
+ this.nonHeapInit = in.readVLong();
+ this.nonHeapMax = in.readVLong();
+ this.directMemoryMax = in.readVLong();
+ }
- Mem() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(heapInit);
+ out.writeVLong(heapMax);
+ out.writeVLong(nonHeapInit);
+ out.writeVLong(nonHeapMax);
+ out.writeVLong(directMemoryMax);
}
public ByteSizeValue getHeapInit() {
@@ -494,23 +543,5 @@ public class JvmInfo implements Streamable, ToXContent {
public ByteSizeValue getDirectMemoryMax() {
return new ByteSizeValue(directMemoryMax);
}
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- heapInit = in.readVLong();
- heapMax = in.readVLong();
- nonHeapInit = in.readVLong();
- nonHeapMax = in.readVLong();
- directMemoryMax = in.readVLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(heapInit);
- out.writeVLong(heapMax);
- out.writeVLong(nonHeapInit);
- out.writeVLong(nonHeapMax);
- out.writeVLong(directMemoryMax);
- }
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java
index 353b417825..5de18ddeec 100644
--- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java
+++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java
@@ -21,7 +21,7 @@ package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -39,14 +39,12 @@ import java.lang.management.RuntimeMXBean;
import java.lang.management.ThreadMXBean;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeUnit;
-/**
- *
- */
-public class JvmStats implements Streamable, ToXContent {
+public class JvmStats implements Writeable, ToXContent {
private static final RuntimeMXBean runtimeMXBean;
private static final MemoryMXBean memoryMXBean;
@@ -61,21 +59,17 @@ public class JvmStats implements Streamable, ToXContent {
}
public static JvmStats jvmStats() {
- JvmStats stats = new JvmStats(System.currentTimeMillis(), runtimeMXBean.getUptime());
- stats.mem = new Mem();
MemoryUsage memUsage = memoryMXBean.getHeapMemoryUsage();
- stats.mem.heapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
- stats.mem.heapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
- stats.mem.heapMax = memUsage.getMax() < 0 ? 0 : memUsage.getMax();
+ long heapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
+ long heapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
+ long heapMax = memUsage.getMax() < 0 ? 0 : memUsage.getMax();
memUsage = memoryMXBean.getNonHeapMemoryUsage();
- stats.mem.nonHeapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
- stats.mem.nonHeapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
-
+ long nonHeapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
+ long nonHeapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
List<MemoryPoolMXBean> memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans();
List<MemoryPool> pools = new ArrayList<>();
- for (int i = 0; i < memoryPoolMXBeans.size(); i++) {
+ for (MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeans) {
try {
- MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i);
MemoryUsage usage = memoryPoolMXBean.getUsage();
MemoryUsage peakUsage = memoryPoolMXBean.getPeakUsage();
String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), null);
@@ -94,55 +88,74 @@ public class JvmStats implements Streamable, ToXContent {
* we just omit the pool in that case!*/
}
}
- stats.mem.pools = pools.toArray(new MemoryPool[pools.size()]);
-
- stats.threads = new Threads();
- stats.threads.count = threadMXBean.getThreadCount();
- stats.threads.peakCount = threadMXBean.getPeakThreadCount();
+ Mem mem = new Mem(heapCommitted, heapUsed, heapMax, nonHeapCommitted, nonHeapUsed, Collections.unmodifiableList(pools));
+ Threads threads = new Threads(threadMXBean.getThreadCount(), threadMXBean.getPeakThreadCount());
List<GarbageCollectorMXBean> gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans();
- stats.gc = new GarbageCollectors();
- stats.gc.collectors = new GarbageCollector[gcMxBeans.size()];
- for (int i = 0; i < stats.gc.collectors.length; i++) {
+ GarbageCollector[] collectors = new GarbageCollector[gcMxBeans.size()];
+ for (int i = 0; i < collectors.length; i++) {
GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i);
- stats.gc.collectors[i] = new GarbageCollector();
- stats.gc.collectors[i].name = GcNames.getByGcName(gcMxBean.getName(), gcMxBean.getName());
- stats.gc.collectors[i].collectionCount = gcMxBean.getCollectionCount();
- stats.gc.collectors[i].collectionTime = gcMxBean.getCollectionTime();
+ collectors[i] = new GarbageCollector(GcNames.getByGcName(gcMxBean.getName(), gcMxBean.getName()),
+ gcMxBean.getCollectionCount(), gcMxBean.getCollectionTime());
}
-
+ GarbageCollectors garbageCollectors = new GarbageCollectors(collectors);
+ List<BufferPool> bufferPoolsList = Collections.emptyList();
try {
List<BufferPoolMXBean> bufferPools = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class);
- stats.bufferPools = new ArrayList<>(bufferPools.size());
+ bufferPoolsList = new ArrayList<>(bufferPools.size());
for (BufferPoolMXBean bufferPool : bufferPools) {
- stats.bufferPools.add(new BufferPool(bufferPool.getName(), bufferPool.getCount(), bufferPool.getTotalCapacity(), bufferPool.getMemoryUsed()));
+ bufferPoolsList.add(new BufferPool(bufferPool.getName(), bufferPool.getCount(),
+ bufferPool.getTotalCapacity(), bufferPool.getMemoryUsed()));
}
} catch (Exception e) {
// buffer pools are not available
}
- stats.classes = new Classes();
- stats.classes.loadedClassCount = classLoadingMXBean.getLoadedClassCount();
- stats.classes.totalLoadedClassCount = classLoadingMXBean.getTotalLoadedClassCount();
- stats.classes.unloadedClassCount = classLoadingMXBean.getUnloadedClassCount();
+ Classes classes = new Classes(classLoadingMXBean.getLoadedClassCount(), classLoadingMXBean.getTotalLoadedClassCount(),
+ classLoadingMXBean.getUnloadedClassCount());
- return stats;
+ return new JvmStats(System.currentTimeMillis(), runtimeMXBean.getUptime(), mem, threads,
+ garbageCollectors, bufferPoolsList, classes);
}
- long timestamp = -1;
- long uptime;
- Mem mem;
- Threads threads;
- GarbageCollectors gc;
- List<BufferPool> bufferPools;
- Classes classes;
-
- private JvmStats() {
- }
+ private final long timestamp;
+ private final long uptime;
+ private final Mem mem;
+ private final Threads threads;
+ private final GarbageCollectors gc;
+ private final List<BufferPool> bufferPools;
+ private final Classes classes;
- public JvmStats(long timestamp, long uptime) {
+ public JvmStats(long timestamp, long uptime, Mem mem, Threads threads, GarbageCollectors gc,
+ List<BufferPool> bufferPools, Classes classes) {
this.timestamp = timestamp;
this.uptime = uptime;
+ this.mem = mem;
+ this.threads = threads;
+ this.gc = gc;
+ this.bufferPools = bufferPools;
+ this.classes = classes;
+ }
+
+ public JvmStats(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ uptime = in.readVLong();
+ mem = new Mem(in);
+ threads = new Threads(in);
+ gc = new GarbageCollectors(in);
+ bufferPools = in.readList(BufferPool::new);
+ classes = new Classes(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeVLong(uptime);
+ mem.writeTo(out);
+ threads.writeTo(out);
+ gc.writeTo(out);
+ out.writeList(bufferPools);
+ classes.writeTo(out);
}
public long getTimestamp() {
@@ -178,53 +191,50 @@ public class JvmStats implements Streamable, ToXContent {
builder.startObject(Fields.JVM);
builder.field(Fields.TIMESTAMP, timestamp);
builder.timeValueField(Fields.UPTIME_IN_MILLIS, Fields.UPTIME, uptime);
- if (mem != null) {
- builder.startObject(Fields.MEM);
- builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, mem.heapUsed);
- if (mem.getHeapUsedPercent() >= 0) {
- builder.field(Fields.HEAP_USED_PERCENT, mem.getHeapUsedPercent());
- }
- builder.byteSizeField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, mem.heapCommitted);
- builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
- builder.byteSizeField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, mem.nonHeapUsed);
- builder.byteSizeField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, mem.nonHeapCommitted);
+ builder.startObject(Fields.MEM);
- builder.startObject(Fields.POOLS);
- for (MemoryPool pool : mem) {
- builder.startObject(pool.getName());
- builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, pool.used);
- builder.byteSizeField(Fields.MAX_IN_BYTES, Fields.MAX, pool.max);
+ builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, mem.heapUsed);
+ if (mem.getHeapUsedPercent() >= 0) {
+ builder.field(Fields.HEAP_USED_PERCENT, mem.getHeapUsedPercent());
+ }
+ builder.byteSizeField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, mem.heapCommitted);
+ builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
+ builder.byteSizeField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, mem.nonHeapUsed);
+ builder.byteSizeField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, mem.nonHeapCommitted);
- builder.byteSizeField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, pool.peakUsed);
- builder.byteSizeField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, pool.peakMax);
+ builder.startObject(Fields.POOLS);
+ for (MemoryPool pool : mem) {
+ builder.startObject(pool.getName());
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, pool.used);
+ builder.byteSizeField(Fields.MAX_IN_BYTES, Fields.MAX, pool.max);
- builder.endObject();
- }
- builder.endObject();
+ builder.byteSizeField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, pool.peakUsed);
+ builder.byteSizeField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, pool.peakMax);
builder.endObject();
}
- if (threads != null) {
- builder.startObject(Fields.THREADS);
- builder.field(Fields.COUNT, threads.getCount());
- builder.field(Fields.PEAK_COUNT, threads.getPeakCount());
- builder.endObject();
- }
- if (gc != null) {
- builder.startObject(Fields.GC);
+ builder.endObject();
- builder.startObject(Fields.COLLECTORS);
- for (GarbageCollector collector : gc) {
- builder.startObject(collector.getName());
- builder.field(Fields.COLLECTION_COUNT, collector.getCollectionCount());
- builder.timeValueField(Fields.COLLECTION_TIME_IN_MILLIS, Fields.COLLECTION_TIME, collector.collectionTime);
- builder.endObject();
- }
- builder.endObject();
+ builder.endObject();
+ builder.startObject(Fields.THREADS);
+ builder.field(Fields.COUNT, threads.getCount());
+ builder.field(Fields.PEAK_COUNT, threads.getPeakCount());
+ builder.endObject();
+
+ builder.startObject(Fields.GC);
+
+ builder.startObject(Fields.COLLECTORS);
+ for (GarbageCollector collector : gc) {
+ builder.startObject(collector.getName());
+ builder.field(Fields.COLLECTION_COUNT, collector.getCollectionCount());
+ builder.timeValueField(Fields.COLLECTION_TIME_IN_MILLIS, Fields.COLLECTION_TIME, collector.collectionTime);
builder.endObject();
}
+ builder.endObject();
+
+ builder.endObject();
if (bufferPools != null) {
builder.startObject(Fields.BUFFER_POOLS);
@@ -238,13 +248,11 @@ public class JvmStats implements Streamable, ToXContent {
builder.endObject();
}
- if (classes != null) {
- builder.startObject(Fields.CLASSES);
- builder.field(Fields.CURRENT_LOADED_COUNT, classes.getLoadedClassCount());
- builder.field(Fields.TOTAL_LOADED_COUNT, classes.getTotalLoadedClassCount());
- builder.field(Fields.TOTAL_UNLOADED_COUNT, classes.getUnloadedClassCount());
- builder.endObject();
- }
+ builder.startObject(Fields.CLASSES);
+ builder.field(Fields.CURRENT_LOADED_COUNT, classes.getLoadedClassCount());
+ builder.field(Fields.TOTAL_LOADED_COUNT, classes.getTotalLoadedClassCount());
+ builder.field(Fields.TOTAL_UNLOADED_COUNT, classes.getUnloadedClassCount());
+ builder.endObject();
builder.endObject();
return builder;
@@ -291,7 +299,6 @@ public class JvmStats implements Streamable, ToXContent {
static final String COLLECTION_TIME_IN_MILLIS = "collection_time_in_millis";
static final String BUFFER_POOLS = "buffer_pools";
- static final String NAME = "name";
static final String TOTAL_CAPACITY = "total_capacity";
static final String TOTAL_CAPACITY_IN_BYTES = "total_capacity_in_bytes";
@@ -301,80 +308,21 @@ public class JvmStats implements Streamable, ToXContent {
static final String TOTAL_UNLOADED_COUNT = "total_unloaded_count";
}
+ public static class GarbageCollectors implements Writeable, Iterable<GarbageCollector> {
- public static JvmStats readJvmStats(StreamInput in) throws IOException {
- JvmStats jvmStats = new JvmStats();
- jvmStats.readFrom(in);
- return jvmStats;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- timestamp = in.readVLong();
- uptime = in.readVLong();
-
- mem = Mem.readMem(in);
- threads = Threads.readThreads(in);
- gc = GarbageCollectors.readGarbageCollectors(in);
-
- if (in.readBoolean()) {
- int size = in.readVInt();
- bufferPools = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- BufferPool bufferPool = new BufferPool();
- bufferPool.readFrom(in);
- bufferPools.add(bufferPool);
- }
- }
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(timestamp);
- out.writeVLong(uptime);
-
- mem.writeTo(out);
- threads.writeTo(out);
- gc.writeTo(out);
+ private final GarbageCollector[] collectors;
- if (bufferPools == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- out.writeVInt(bufferPools.size());
- for (BufferPool bufferPool : bufferPools) {
- bufferPool.writeTo(out);
- }
+ public GarbageCollectors(GarbageCollector[] collectors) {
+ this.collectors = collectors;
}
- }
-
- public static class GarbageCollectors implements Streamable, Iterable<GarbageCollector> {
- GarbageCollector[] collectors;
-
- GarbageCollectors() {
- }
-
- public static GarbageCollectors readGarbageCollectors(StreamInput in) throws IOException {
- GarbageCollectors collectors = new GarbageCollectors();
- collectors.readFrom(in);
- return collectors;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- collectors = new GarbageCollector[in.readVInt()];
- for (int i = 0; i < collectors.length; i++) {
- collectors[i] = GarbageCollector.readGarbageCollector(in);
- }
+ public GarbageCollectors(StreamInput in) throws IOException {
+ collectors = in.readArray(GarbageCollector::new, GarbageCollector[]::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(collectors.length);
- for (GarbageCollector gc : collectors) {
- gc.writeTo(out);
- }
+ out.writeArray(collectors);
}
public GarbageCollector[] getCollectors() {
@@ -387,23 +335,19 @@ public class JvmStats implements Streamable, ToXContent {
}
}
- public static class GarbageCollector implements Streamable {
-
- String name;
- long collectionCount;
- long collectionTime;
+ public static class GarbageCollector implements Writeable {
- GarbageCollector() {
- }
+ private final String name;
+ private final long collectionCount;
+ private final long collectionTime;
- public static GarbageCollector readGarbageCollector(StreamInput in) throws IOException {
- GarbageCollector gc = new GarbageCollector();
- gc.readFrom(in);
- return gc;
+ public GarbageCollector(String name, long collectionCount, long collectionTime) {
+ this.name = name;
+ this.collectionCount = collectionCount;
+ this.collectionTime = collectionTime;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public GarbageCollector(StreamInput in) throws IOException {
name = in.readString();
collectionCount = in.readVLong();
collectionTime = in.readVLong();
@@ -429,30 +373,17 @@ public class JvmStats implements Streamable, ToXContent {
}
}
- public static class Threads implements Streamable {
+ public static class Threads implements Writeable {
- int count;
- int peakCount;
+ private final int count;
+ private final int peakCount;
- Threads() {
- }
-
- public int getCount() {
- return count;
- }
-
- public int getPeakCount() {
- return peakCount;
- }
-
- public static Threads readThreads(StreamInput in) throws IOException {
- Threads threads = new Threads();
- threads.readFrom(in);
- return threads;
+ public Threads(int count, int peakCount) {
+ this.count = count;
+ this.peakCount = peakCount;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public Threads(StreamInput in) throws IOException {
count = in.readVInt();
peakCount = in.readVInt();
}
@@ -462,20 +393,23 @@ public class JvmStats implements Streamable, ToXContent {
out.writeVInt(count);
out.writeVInt(peakCount);
}
- }
- public static class MemoryPool implements Streamable {
-
- String name;
- long used;
- long max;
+ public int getCount() {
+ return count;
+ }
- long peakUsed;
- long peakMax;
+ public int getPeakCount() {
+ return peakCount;
+ }
+ }
- MemoryPool() {
+ public static class MemoryPool implements Writeable {
- }
+ private final String name;
+ private final long used;
+ private final long max;
+ private final long peakUsed;
+ private final long peakMax;
public MemoryPool(String name, long used, long max, long peakUsed, long peakMax) {
this.name = name;
@@ -485,10 +419,21 @@ public class JvmStats implements Streamable, ToXContent {
this.peakMax = peakMax;
}
- public static MemoryPool readMemoryPool(StreamInput in) throws IOException {
- MemoryPool pool = new MemoryPool();
- pool.readFrom(in);
- return pool;
+ public MemoryPool(StreamInput in) throws IOException {
+ name = in.readString();
+ used = in.readVLong();
+ max = in.readVLong();
+ peakUsed = in.readVLong();
+ peakMax = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(used);
+ out.writeVLong(max);
+ out.writeVLong(peakUsed);
+ out.writeVLong(peakMax);
}
public String getName() {
@@ -510,61 +455,33 @@ public class JvmStats implements Streamable, ToXContent {
public ByteSizeValue getPeakMax() {
return new ByteSizeValue(peakMax);
}
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- name = in.readString();
- used = in.readVLong();
- max = in.readVLong();
- peakUsed = in.readVLong();
- peakMax = in.readVLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(name);
- out.writeVLong(used);
- out.writeVLong(max);
- out.writeVLong(peakUsed);
- out.writeVLong(peakMax);
- }
}
- public static class Mem implements Streamable, Iterable<MemoryPool> {
-
- long heapCommitted;
- long heapUsed;
- long heapMax;
- long nonHeapCommitted;
- long nonHeapUsed;
+ public static class Mem implements Writeable, Iterable<MemoryPool> {
- MemoryPool[] pools = new MemoryPool[0];
+ private final long heapCommitted;
+ private final long heapUsed;
+ private final long heapMax;
+ private final long nonHeapCommitted;
+ private final long nonHeapUsed;
+ private final List<MemoryPool> pools;
- Mem() {
+ public Mem(long heapCommitted, long heapUsed, long heapMax, long nonHeapCommitted, long nonHeapUsed, List<MemoryPool> pools) {
+ this.heapCommitted = heapCommitted;
+ this.heapUsed = heapUsed;
+ this.heapMax = heapMax;
+ this.nonHeapCommitted = nonHeapCommitted;
+ this.nonHeapUsed = nonHeapUsed;
+ this.pools = pools;
}
- public static Mem readMem(StreamInput in) throws IOException {
- Mem mem = new Mem();
- mem.readFrom(in);
- return mem;
- }
-
- @Override
- public Iterator<MemoryPool> iterator() {
- return Arrays.stream(pools).iterator();
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public Mem(StreamInput in) throws IOException {
heapCommitted = in.readVLong();
heapUsed = in.readVLong();
nonHeapCommitted = in.readVLong();
nonHeapUsed = in.readVLong();
heapMax = in.readVLong();
- pools = new MemoryPool[in.readVInt()];
- for (int i = 0; i < pools.length; i++) {
- pools[i] = MemoryPool.readMemoryPool(in);
- }
+ pools = in.readList(MemoryPool::new);
}
@Override
@@ -574,10 +491,12 @@ public class JvmStats implements Streamable, ToXContent {
out.writeVLong(nonHeapCommitted);
out.writeVLong(nonHeapUsed);
out.writeVLong(heapMax);
- out.writeVInt(pools.length);
- for (MemoryPool pool : pools) {
- pool.writeTo(out);
- }
+ out.writeList(pools);
+ }
+
+ @Override
+ public Iterator<MemoryPool> iterator() {
+ return pools.iterator();
}
public ByteSizeValue getHeapCommitted() {
@@ -614,15 +533,12 @@ public class JvmStats implements Streamable, ToXContent {
}
}
- public static class BufferPool implements Streamable {
-
- String name;
- long count;
- long totalCapacity;
- long used;
+ public static class BufferPool implements Writeable {
- BufferPool() {
- }
+ private final String name;
+ private final long count;
+ private final long totalCapacity;
+ private final long used;
public BufferPool(String name, long count, long totalCapacity, long used) {
this.name = name;
@@ -631,6 +547,21 @@ public class JvmStats implements Streamable, ToXContent {
this.used = used;
}
+ public BufferPool(StreamInput in) throws IOException {
+ name = in.readString();
+ count = in.readLong();
+ totalCapacity = in.readLong();
+ used = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeLong(count);
+ out.writeLong(totalCapacity);
+ out.writeLong(used);
+ }
+
public String getName() {
return this.name;
}
@@ -646,32 +577,13 @@ public class JvmStats implements Streamable, ToXContent {
public ByteSizeValue getUsed() {
return new ByteSizeValue(used);
}
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- name = in.readString();
- count = in.readLong();
- totalCapacity = in.readLong();
- used = in.readLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(name);
- out.writeLong(count);
- out.writeLong(totalCapacity);
- out.writeLong(used);
- }
}
- public static class Classes implements Streamable {
-
- long loadedClassCount;
- long totalLoadedClassCount;
- long unloadedClassCount;
+ public static class Classes implements Writeable {
- Classes() {
- }
+ private final long loadedClassCount;
+ private final long totalLoadedClassCount;
+ private final long unloadedClassCount;
public Classes(long loadedClassCount, long totalLoadedClassCount, long unloadedClassCount) {
this.loadedClassCount = loadedClassCount;
@@ -679,20 +591,7 @@ public class JvmStats implements Streamable, ToXContent {
this.unloadedClassCount = unloadedClassCount;
}
- public long getLoadedClassCount() {
- return loadedClassCount;
- }
-
- public long getTotalLoadedClassCount() {
- return totalLoadedClassCount;
- }
-
- public long getUnloadedClassCount() {
- return unloadedClassCount;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public Classes(StreamInput in) throws IOException {
loadedClassCount = in.readLong();
totalLoadedClassCount = in.readLong();
unloadedClassCount = in.readLong();
@@ -704,5 +603,17 @@ public class JvmStats implements Streamable, ToXContent {
out.writeLong(totalLoadedClassCount);
out.writeLong(unloadedClassCount);
}
+
+ public long getLoadedClassCount() {
+ return loadedClassCount;
+ }
+
+ public long getTotalLoadedClassCount() {
+ return totalLoadedClassCount;
+ }
+
+ public long getUnloadedClassCount() {
+ return unloadedClassCount;
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java
index 599755e78a..af6ea85180 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java
@@ -21,13 +21,8 @@ package org.elasticsearch.monitor.os;
public class DummyOsInfo extends OsInfo {
- DummyOsInfo() {
- refreshInterval = 0;
- availableProcessors = 0;
- allocatedProcessors = 0;
- name = "dummy_name";
- arch = "dummy_arch";
- version = "dummy_version";
+ private DummyOsInfo() {
+ super(0, 0, 0, "dummy_name", "dummy_arch", "dummy_version");
}
public static final DummyOsInfo INSTANCE = new DummyOsInfo();
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
index f052035852..7a0175c31d 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
@@ -21,25 +21,47 @@ package org.elasticsearch.monitor.os;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class OsInfo implements Streamable, ToXContent {
-
- long refreshInterval;
-
- int availableProcessors;
-
- int allocatedProcessors;
+public class OsInfo implements Writeable, ToXContent {
+
+ private final long refreshInterval;
+ private final int availableProcessors;
+ private final int allocatedProcessors;
+ private final String name;
+ private final String arch;
+ private final String version;
+
+ public OsInfo(long refreshInterval, int availableProcessors, int allocatedProcessors, String name, String arch, String version) {
+ this.refreshInterval = refreshInterval;
+ this.availableProcessors = availableProcessors;
+ this.allocatedProcessors = allocatedProcessors;
+ this.name = name;
+ this.arch = arch;
+ this.version = version;
+ }
- String name = null;
- String arch = null;
- String version = null;
+ public OsInfo(StreamInput in) throws IOException {
+ this.refreshInterval = in.readLong();
+ this.availableProcessors = in.readInt();
+ this.allocatedProcessors = in.readInt();
+ this.name = in.readOptionalString();
+ this.arch = in.readOptionalString();
+ this.version = in.readOptionalString();
+ }
- OsInfo() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(refreshInterval);
+ out.writeInt(availableProcessors);
+ out.writeInt(allocatedProcessors);
+ out.writeOptionalString(name);
+ out.writeOptionalString(arch);
+ out.writeOptionalString(version);
}
public long getRefreshInterval() {
@@ -95,30 +117,4 @@ public class OsInfo implements Streamable, ToXContent {
builder.endObject();
return builder;
}
-
- public static OsInfo readOsInfo(StreamInput in) throws IOException {
- OsInfo info = new OsInfo();
- info.readFrom(in);
- return info;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- refreshInterval = in.readLong();
- availableProcessors = in.readInt();
- allocatedProcessors = in.readInt();
- name = in.readOptionalString();
- arch = in.readOptionalString();
- version = in.readOptionalString();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeLong(refreshInterval);
- out.writeInt(availableProcessors);
- out.writeInt(allocatedProcessors);
- out.writeOptionalString(name);
- out.writeOptionalString(arch);
- out.writeOptionalString(version);
- }
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
index 017f961a31..08abfc05f1 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
@@ -163,33 +163,16 @@ public class OsProbe {
private OsProbe() {
}
- public OsInfo osInfo() {
- OsInfo info = new OsInfo();
- info.availableProcessors = Runtime.getRuntime().availableProcessors();
- info.name = Constants.OS_NAME;
- info.arch = Constants.OS_ARCH;
- info.version = Constants.OS_VERSION;
- return info;
+ public OsInfo osInfo(long refreshInterval, int allocatedProcessors) {
+ return new OsInfo(refreshInterval, Runtime.getRuntime().availableProcessors(),
+ allocatedProcessors, Constants.OS_NAME, Constants.OS_ARCH, Constants.OS_VERSION);
}
public OsStats osStats() {
- OsStats stats = new OsStats();
- stats.timestamp = System.currentTimeMillis();
- stats.cpu = new OsStats.Cpu();
- stats.cpu.percent = getSystemCpuPercent();
- stats.cpu.loadAverage = getSystemLoadAverage();
-
- OsStats.Mem mem = new OsStats.Mem();
- mem.total = getTotalPhysicalMemorySize();
- mem.free = getFreePhysicalMemorySize();
- stats.mem = mem;
-
- OsStats.Swap swap = new OsStats.Swap();
- swap.total = getTotalSwapSpaceSize();
- swap.free = getFreeSwapSpaceSize();
- stats.swap = swap;
-
- return stats;
+ OsStats.Cpu cpu = new OsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage());
+ OsStats.Mem mem = new OsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize());
+ OsStats.Swap swap = new OsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize());
+ return new OsStats(System.currentTimeMillis(), cpu, mem , swap);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java
index 248b49f21c..cb67eef852 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java
@@ -27,32 +27,22 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.SingleObjectCache;
import org.elasticsearch.common.util.concurrent.EsExecutors;
-/**
- *
- */
public class OsService extends AbstractComponent {
private final OsProbe probe;
-
private final OsInfo info;
-
- private SingleObjectCache<OsStats> osStatsCache;
+ private final SingleObjectCache<OsStats> osStatsCache;
public static final Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1),
- Property.NodeScope);
+ Property.NodeScope);
public OsService(Settings settings) {
super(settings);
this.probe = OsProbe.getInstance();
-
TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
-
- this.info = probe.osInfo();
- this.info.refreshInterval = refreshInterval.millis();
- this.info.allocatedProcessors = EsExecutors.boundedNumberOfProcessors(settings);
-
- osStatsCache = new OsStatsCache(refreshInterval, probe.osStats());
+ this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings));
+ this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats());
logger.debug("using refresh_interval [{}]", refreshInterval);
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
index 51302d7ae6..e07b92a6cb 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
@@ -21,28 +21,42 @@ package org.elasticsearch.monitor.os;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Objects;
-/**
- *
- */
-public class OsStats implements Streamable, ToXContent {
-
- long timestamp;
+public class OsStats implements Writeable, ToXContent {
- Cpu cpu = null;
+ private final long timestamp;
+ private final Cpu cpu;
+ private final Mem mem;
+ private final Swap swap;
- Mem mem = null;
+ public OsStats(long timestamp, Cpu cpu, Mem mem, Swap swap) {
+ this.timestamp = timestamp;
+ this.cpu = Objects.requireNonNull(cpu, "cpu must not be null");
+ this.mem = Objects.requireNonNull(mem, "mem must not be null");;
+ this.swap = Objects.requireNonNull(swap, "swap must not be null");;
+ }
- Swap swap = null;
+ public OsStats(StreamInput in) throws IOException {
+ this.timestamp = in.readVLong();
+ this.cpu = new Cpu(in);
+ this.mem = new Mem(in);
+ this.swap = new Swap(in);
+ }
- OsStats() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ cpu.writeTo(out);
+ mem.writeTo(out);
+ swap.writeTo(out);
}
public long getTimestamp() {
@@ -65,9 +79,9 @@ public class OsStats implements Streamable, ToXContent {
static final String CPU = "cpu";
static final String PERCENT = "percent";
static final String LOAD_AVERAGE = "load_average";
- static final String LOAD_AVERAGE_1M = new String("1m");
- static final String LOAD_AVERAGE_5M = new String("5m");
- static final String LOAD_AVERAGE_15M = new String("15m");
+ static final String LOAD_AVERAGE_1M = "1m";
+ static final String LOAD_AVERAGE_5M = "5m";
+ static final String LOAD_AVERAGE_15M = "15m";
static final String MEM = "mem";
static final String SWAP = "swap";
@@ -86,105 +100,29 @@ public class OsStats implements Streamable, ToXContent {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.OS);
builder.field(Fields.TIMESTAMP, getTimestamp());
- if (cpu != null) {
- builder.startObject(Fields.CPU);
- builder.field(Fields.PERCENT, cpu.getPercent());
- if (cpu.getLoadAverage() != null && Arrays.stream(cpu.getLoadAverage()).anyMatch(load -> load != -1)) {
- builder.startObject(Fields.LOAD_AVERAGE);
- if (cpu.getLoadAverage()[0] != -1) {
- builder.field(Fields.LOAD_AVERAGE_1M, cpu.getLoadAverage()[0]);
- }
- if (cpu.getLoadAverage()[1] != -1) {
- builder.field(Fields.LOAD_AVERAGE_5M, cpu.getLoadAverage()[1]);
- }
- if (cpu.getLoadAverage()[2] != -1) {
- builder.field(Fields.LOAD_AVERAGE_15M, cpu.getLoadAverage()[2]);
- }
- builder.endObject();
- }
- builder.endObject();
- }
-
- if (mem != null) {
- builder.startObject(Fields.MEM);
- builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mem.getTotal());
- builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, mem.getFree());
- builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, mem.getUsed());
-
- builder.field(Fields.FREE_PERCENT, mem.getFreePercent());
- builder.field(Fields.USED_PERCENT, mem.getUsedPercent());
-
- builder.endObject();
- }
-
- if (swap != null) {
- builder.startObject(Fields.SWAP);
- builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, swap.getTotal());
- builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, swap.getFree());
- builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, swap.getUsed());
- builder.endObject();
- }
-
+ cpu.toXContent(builder, params);
+ mem.toXContent(builder, params);
+ swap.toXContent(builder, params);
builder.endObject();
return builder;
}
- public static OsStats readOsStats(StreamInput in) throws IOException {
- OsStats stats = new OsStats();
- stats.readFrom(in);
- return stats;
- }
+ public static class Cpu implements Writeable, ToXContent {
- @Override
- public void readFrom(StreamInput in) throws IOException {
- timestamp = in.readVLong();
- cpu = in.readOptionalStreamable(Cpu::new);
- if (in.readBoolean()) {
- mem = Mem.readMem(in);
- }
- if (in.readBoolean()) {
- swap = Swap.readSwap(in);
- }
- }
+ private final short percent;
+ private final double[] loadAverage;
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(timestamp);
- out.writeOptionalStreamable(cpu);
- if (mem == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- mem.writeTo(out);
+ public Cpu(short systemCpuPercent, double[] systemLoadAverage) {
+ this.percent = systemCpuPercent;
+ this.loadAverage = systemLoadAverage;
}
- if (swap == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- swap.writeTo(out);
- }
- }
-
- public static class Cpu implements Streamable {
-
- short percent = -1;
- double[] loadAverage = null;
-
- Cpu() {}
- public static Cpu readCpu(StreamInput in) throws IOException {
- Cpu cpu = new Cpu();
- cpu.readFrom(in);
- return cpu;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- percent = in.readShort();
+ public Cpu(StreamInput in) throws IOException {
+ this.percent = in.readShort();
if (in.readBoolean()) {
- loadAverage = in.readDoubleArray();
+ this.loadAverage = in.readDoubleArray();
} else {
- loadAverage = null;
+ this.loadAverage = null;
}
}
@@ -206,12 +144,49 @@ public class OsStats implements Streamable, ToXContent {
public double[] getLoadAverage() {
return loadAverage;
}
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.CPU);
+ builder.field(Fields.PERCENT, getPercent());
+ if (getLoadAverage() != null && Arrays.stream(getLoadAverage()).anyMatch(load -> load != -1)) {
+ builder.startObject(Fields.LOAD_AVERAGE);
+ if (getLoadAverage()[0] != -1) {
+ builder.field(Fields.LOAD_AVERAGE_1M, getLoadAverage()[0]);
+ }
+ if (getLoadAverage()[1] != -1) {
+ builder.field(Fields.LOAD_AVERAGE_5M, getLoadAverage()[1]);
+ }
+ if (getLoadAverage()[2] != -1) {
+ builder.field(Fields.LOAD_AVERAGE_15M, getLoadAverage()[2]);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
}
- public static class Swap implements Streamable {
+ public static class Swap implements Writeable, ToXContent {
+
+ private final long total;
+ private final long free;
- long total = -1;
- long free = -1;
+ public Swap(long total, long free) {
+ this.total = total;
+ this.free = free;
+ }
+
+ public Swap(StreamInput in) throws IOException {
+ this.total = in.readLong();
+ this.free = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(total);
+ out.writeLong(free);
+ }
public ByteSizeValue getFree() {
return new ByteSizeValue(free);
@@ -225,40 +200,30 @@ public class OsStats implements Streamable, ToXContent {
return new ByteSizeValue(total);
}
- public static Swap readSwap(StreamInput in) throws IOException {
- Swap swap = new Swap();
- swap.readFrom(in);
- return swap;
- }
-
@Override
- public void readFrom(StreamInput in) throws IOException {
- total = in.readLong();
- free = in.readLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeLong(total);
- out.writeLong(free);
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.SWAP);
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
+ builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed());
+ builder.endObject();
+ return builder;
}
}
- public static class Mem implements Streamable {
+ public static class Mem implements Writeable, ToXContent {
- long total = -1;
- long free = -1;
+ private final long total;
+ private final long free;
- public static Mem readMem(StreamInput in) throws IOException {
- Mem mem = new Mem();
- mem.readFrom(in);
- return mem;
+ public Mem(long total, long free) {
+ this.total = total;
+ this.free = free;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
- total = in.readLong();
- free = in.readLong();
+ public Mem(StreamInput in) throws IOException {
+ this.total = in.readLong();
+ this.free = in.readLong();
}
@Override
@@ -276,7 +241,7 @@ public class OsStats implements Streamable, ToXContent {
}
public short getUsedPercent() {
- return calculatePercentage(getUsed().bytes(), getTotal().bytes());
+ return calculatePercentage(getUsed().getBytes(), total);
}
public ByteSizeValue getFree() {
@@ -284,11 +249,23 @@ public class OsStats implements Streamable, ToXContent {
}
public short getFreePercent() {
- return calculatePercentage(getFree().bytes(), getTotal().bytes());
+ return calculatePercentage(free, total);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
+ builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed());
+ builder.field(Fields.FREE_PERCENT, getFreePercent());
+ builder.field(Fields.USED_PERCENT, getUsedPercent());
+ builder.endObject();
+ return builder;
}
}
- private static short calculatePercentage(long used, long max) {
+ public static short calculatePercentage(long used, long max) {
return max <= 0 ? 0 : (short) (Math.round((100d * used) / max));
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java
index cf9c9e63b8..a0e3e7a70f 100644
--- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java
@@ -21,26 +21,35 @@ package org.elasticsearch.monitor.process;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class ProcessInfo implements Streamable, ToXContent {
+public class ProcessInfo implements Writeable, ToXContent {
- long refreshInterval;
+ private final long refreshInterval;
+ private final long id;
+ private final boolean mlockall;
- private long id;
-
- private boolean mlockall;
+ public ProcessInfo(long id, boolean mlockall, long refreshInterval) {
+ this.id = id;
+ this.mlockall = mlockall;
+ this.refreshInterval = refreshInterval;
+ }
- ProcessInfo() {
+ public ProcessInfo(StreamInput in) throws IOException {
+ refreshInterval = in.readLong();
+ id = in.readLong();
+ mlockall = in.readBoolean();
}
- public ProcessInfo(long id, boolean mlockall) {
- this.id = id;
- this.mlockall = mlockall;
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(refreshInterval);
+ out.writeLong(id);
+ out.writeBoolean(mlockall);
}
public long refreshInterval() {
@@ -79,24 +88,4 @@ public class ProcessInfo implements Streamable, ToXContent {
builder.endObject();
return builder;
}
-
- public static ProcessInfo readProcessInfo(StreamInput in) throws IOException {
- ProcessInfo info = new ProcessInfo();
- info.readFrom(in);
- return info;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- refreshInterval = in.readLong();
- id = in.readLong();
- mlockall = in.readBoolean();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeLong(refreshInterval);
- out.writeLong(id);
- out.writeBoolean(mlockall);
- }
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java
index b19b54a947..c6434d2480 100644
--- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java
+++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java
@@ -126,26 +126,14 @@ public class ProcessProbe {
return -1;
}
- public ProcessInfo processInfo() {
- return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked());
+ public ProcessInfo processInfo(long refreshInterval) {
+ return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked(), refreshInterval);
}
public ProcessStats processStats() {
- ProcessStats stats = new ProcessStats();
- stats.timestamp = System.currentTimeMillis();
- stats.openFileDescriptors = getOpenFileDescriptorCount();
- stats.maxFileDescriptors = getMaxFileDescriptorCount();
-
- ProcessStats.Cpu cpu = new ProcessStats.Cpu();
- cpu.percent = getProcessCpuPercent();
- cpu.total = getProcessCpuTotalTime();
- stats.cpu = cpu;
-
- ProcessStats.Mem mem = new ProcessStats.Mem();
- mem.totalVirtual = getTotalVirtualMemorySize();
- stats.mem = mem;
-
- return stats;
+ ProcessStats.Cpu cpu = new ProcessStats.Cpu(getProcessCpuPercent(), getProcessCpuTotalTime());
+ ProcessStats.Mem mem = new ProcessStats.Mem(getTotalVirtualMemorySize());
+ return new ProcessStats(System.currentTimeMillis(), getOpenFileDescriptorCount(), getMaxFileDescriptorCount(), cpu, mem);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java
index 3859353448..99593003b3 100644
--- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java
+++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java
@@ -42,11 +42,9 @@ public final class ProcessService extends AbstractComponent {
public ProcessService(Settings settings) {
super(settings);
this.probe = ProcessProbe.getInstance();
-
final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
- this.info = probe.processInfo();
- this.info.refreshInterval = refreshInterval.millis();
+ this.info = probe.processInfo(refreshInterval.millis());
logger.debug("using refresh_interval [{}]", refreshInterval);
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java
index 310cb215ae..2a9b895277 100644
--- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java
+++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java
@@ -21,7 +21,7 @@ package org.elasticsearch.monitor.process;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -29,18 +29,37 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class ProcessStats implements Streamable, ToXContent {
+public class ProcessStats implements Writeable, ToXContent {
- long timestamp = -1;
+ private final long timestamp;
+ private final long openFileDescriptors;
+ private final long maxFileDescriptors;
+ private final Cpu cpu;
+ private final Mem mem;
- long openFileDescriptors = -1;
- long maxFileDescriptors = -1;
-
- Cpu cpu = null;
+ public ProcessStats(long timestamp, long openFileDescriptors, long maxFileDescriptors, Cpu cpu, Mem mem) {
+ this.timestamp = timestamp;
+ this.openFileDescriptors = openFileDescriptors;
+ this.maxFileDescriptors = maxFileDescriptors;
+ this.cpu = cpu;
+ this.mem = mem;
+ }
- Mem mem = null;
+ public ProcessStats(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ openFileDescriptors = in.readLong();
+ maxFileDescriptors = in.readLong();
+ cpu = in.readOptionalWriteable(Cpu::new);
+ mem = in.readOptionalWriteable(Mem::new);
+ }
- ProcessStats() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeLong(openFileDescriptors);
+ out.writeLong(maxFileDescriptors);
+ out.writeOptionalWriteable(cpu);
+ out.writeOptionalWriteable(mem);
}
public long getTimestamp() {
@@ -100,59 +119,15 @@ public class ProcessStats implements Streamable, ToXContent {
return builder;
}
- public static ProcessStats readProcessStats(StreamInput in) throws IOException {
- ProcessStats stats = new ProcessStats();
- stats.readFrom(in);
- return stats;
- }
+ public static class Mem implements Writeable {
- @Override
- public void readFrom(StreamInput in) throws IOException {
- timestamp = in.readVLong();
- openFileDescriptors = in.readLong();
- maxFileDescriptors = in.readLong();
- if (in.readBoolean()) {
- cpu = Cpu.readCpu(in);
- }
- if (in.readBoolean()) {
- mem = Mem.readMem(in);
- }
- }
+ private final long totalVirtual;
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(timestamp);
- out.writeLong(openFileDescriptors);
- out.writeLong(maxFileDescriptors);
- if (cpu == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- cpu.writeTo(out);
- }
- if (mem == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- mem.writeTo(out);
- }
- }
-
- public static class Mem implements Streamable {
-
- long totalVirtual = -1;
-
- Mem() {
+ public Mem(long totalVirtual) {
+ this.totalVirtual = totalVirtual;
}
- public static Mem readMem(StreamInput in) throws IOException {
- Mem mem = new Mem();
- mem.readFrom(in);
- return mem;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public Mem(StreamInput in) throws IOException {
totalVirtual = in.readLong();
}
@@ -166,23 +141,17 @@ public class ProcessStats implements Streamable, ToXContent {
}
}
- public static class Cpu implements Streamable {
+ public static class Cpu implements Writeable {
- short percent = -1;
- long total = -1;
+ private final short percent;
+ private final long total;
- Cpu() {
-
- }
-
- public static Cpu readCpu(StreamInput in) throws IOException {
- Cpu cpu = new Cpu();
- cpu.readFrom(in);
- return cpu;
+ public Cpu(short percent, long total) {
+ this.percent = percent;
+ this.total = total;
}
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public Cpu(StreamInput in) throws IOException {
percent = in.readShort();
total = in.readLong();
}
diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java
index 7b098323f7..2d03a057b5 100644
--- a/core/src/main/java/org/elasticsearch/node/Node.java
+++ b/core/src/main/java/org/elasticsearch/node/Node.java
@@ -19,6 +19,10 @@
package org.elasticsearch.node;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configurator;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
@@ -40,18 +44,20 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Key;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkModule;
@@ -65,7 +71,6 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings;
@@ -74,6 +79,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.analysis.AnalysisRegistry;
@@ -84,6 +90,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
+import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.ingest.IngestService;
@@ -93,11 +100,13 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.AnalysisPlugin;
+import org.elasticsearch.plugins.ClusterPlugin;
import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.IngestPlugin;
import org.elasticsearch.plugins.MapperPlugin;
-import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.MetaDataUpgrader;
+import org.elasticsearch.plugins.NetworkPlugin;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.plugins.ScriptPlugin;
@@ -106,13 +115,18 @@ import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchExtRegistry;
import org.elasticsearch.search.SearchModule;
+import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.snapshots.SnapshotShardsService;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherService;
@@ -135,6 +149,7 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
@@ -214,7 +229,7 @@ public class Node implements Closeable {
boolean success = false;
{
// use temp logger just to say we are starting. we can't use it later on because the node name might not be set
- ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings()));
+ Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings()));
logger.info("initializing ...");
}
@@ -234,17 +249,16 @@ public class Node implements Closeable {
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeEnvironment.nodeId());
- ESLogger logger = Loggers.getLogger(Node.class, tmpSettings);
+ Logger logger = Loggers.getLogger(Node.class, tmpSettings);
if (hadPredefinedNodeName == false) {
logger.info("node name [{}] derived from node ID; set [{}] to override",
NODE_NAME_SETTING.get(tmpSettings), NODE_NAME_SETTING.getKey());
}
- final String displayVersion = Version.CURRENT + (Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "");
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
logger.info(
"version[{}], pid[{}], build[{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]",
- displayVersion,
+ displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()),
jvmInfo.pid(),
Build.CURRENT.shortHash(),
Build.CURRENT.date(),
@@ -255,18 +269,12 @@ public class Node implements Closeable {
Constants.JVM_NAME,
Constants.JAVA_VERSION,
Constants.JVM_VERSION);
-
+ warnIfPreRelease(Version.CURRENT, Build.CURRENT.isSnapshot(), logger);
if (logger.isDebugEnabled()) {
logger.debug("using config [{}], data [{}], logs [{}], plugins [{}]",
environment.configFile(), Arrays.toString(environment.dataFiles()), environment.logsFile(), environment.pluginsFile());
}
- // TODO: Remove this in Elasticsearch 6.0.0
- if (JsonXContent.unquotedFieldNamesSet) {
- DeprecationLogger dLogger = new DeprecationLogger(logger);
- dLogger.deprecated("[{}] has been set, but will be removed in Elasticsearch 6.0.0",
- JsonXContent.JSON_ALLOW_UNQUOTED_FIELD_NAMES);
- }
this.pluginsService = new PluginsService(tmpSettings, environment.modulesFile(), environment.pluginsFile(), classpathPlugins);
this.settings = pluginsService.updatedSettings();
@@ -306,10 +314,10 @@ public class Node implements Closeable {
final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool);
clusterService.add(scriptModule.getScriptService());
resourcesToClose.add(clusterService);
- final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId());
+ final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), classpathPlugins);
resourcesToClose.add(tribeService);
final IngestService ingestService = new IngestService(settings, threadPool, this.environment,
- scriptModule.getScriptService(), pluginsService.filterPlugins(IngestPlugin.class));
+ scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class));
ModulesBuilder modules = new ModulesBuilder();
// plugin modules must be added here, before others or we can get crazy injection errors...
@@ -318,18 +326,17 @@ public class Node implements Closeable {
}
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);
modules.add(new NodeModule(this, monitorService));
- NetworkModule networkModule = new NetworkModule(networkService, settings, false);
- modules.add(networkModule);
modules.add(new DiscoveryModule(this.settings));
- ClusterModule clusterModule = new ClusterModule(settings, clusterService);
+ ClusterModule clusterModule = new ClusterModule(settings, clusterService,
+ pluginsService.filterPlugins(ClusterPlugin.class));
modules.add(clusterModule);
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
modules.add(indicesModule);
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
- modules.add(searchModule);
- modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings,
+ ActionModule actionModule = new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings,
clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(),
- pluginsService.filterPlugins(ActionPlugin.class)));
+ pluginsService.filterPlugins(ActionPlugin.class));
+ modules.add(actionModule);
modules.add(new GatewayModule());
modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class)));
pluginsService.processModules(modules);
@@ -340,13 +347,19 @@ public class Node implements Closeable {
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
- networkModule.getNamedWriteables().stream(),
+ NetworkModule.getNamedWriteables().stream(),
indicesModule.getNamedWriteables().stream(),
searchModule.getNamedWriteables().stream(),
pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.getNamedWriteables().stream()))
.flatMap(Function.identity()).collect(Collectors.toList());
final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
+ final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment);
+ final IndicesService indicesService = new IndicesService(settings, pluginsService, nodeEnvironment,
+ settingsModule.getClusterSettings(), analysisModule.getAnalysisRegistry(), searchModule.getQueryParserRegistry(),
+ clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry,
+ threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, metaStateService);
+
client = new NodeClient(settings, threadPool);
Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService,
@@ -356,8 +369,30 @@ public class Node implements Closeable {
pluginsService.filterPlugins(Plugin.class).stream()
.map(Plugin::getCustomMetaDataUpgrader)
.collect(Collectors.toList());
+ final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
+ bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders);
+ final Transport transport = networkModule.getTransportSupplier().get();
+ final TransportService transportService = newTransportService(settings, transport, threadPool,
+ networkModule.getTransportInterceptor());
+ final Consumer<Binder> httpBind;
+ if (networkModule.isHttpEnabled()) {
+ HttpServerTransport httpServerTransport = networkModule.getHttpServerTransportSupplier().get();
+ HttpServer httpServer = new HttpServer(settings, httpServerTransport, actionModule.getRestController(), client,
+ circuitBreakerService);
+ httpBind = b -> {
+ b.bind(HttpServer.class).toInstance(httpServer);
+ b.bind(HttpServerTransport.class).toInstance(httpServerTransport);
+ };
+ } else {
+ httpBind = b -> {
+ b.bind(HttpServer.class).toProvider(Providers.of(null));
+ };
+ }
modules.add(b -> {
+ b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry());
+ b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers());
+ b.bind(SearchExtRegistry.class).toInstance(searchModule.getSearchExtRegistry());
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(Client.class).toInstance(client);
b.bind(NodeClient.class).toInstance(client);
@@ -372,14 +407,17 @@ public class Node implements Closeable {
b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry());
b.bind(IngestService.class).toInstance(ingestService);
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
- Class<? extends SearchService> searchServiceImpl = pickSearchServiceImplementation();
- if (searchServiceImpl == SearchService.class) {
- b.bind(SearchService.class).asEagerSingleton();
- } else {
- b.bind(SearchService.class).to(searchServiceImpl).asEagerSingleton();
- }
- pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
+ b.bind(MetaStateService.class).toInstance(metaStateService);
+ b.bind(IndicesService.class).toInstance(indicesService);
+ b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
+ threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase()));
+ b.bind(Transport.class).toInstance(transport);
+ b.bind(TransportService.class).toInstance(transportService);
+ b.bind(NetworkService.class).toInstance(networkService);
+ b.bind(AllocationCommandRegistry.class).toInstance(NetworkModule.getAllocationCommandRegistry());
+ httpBind.accept(b);
+ pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
}
);
injector = modules.createInjector();
@@ -406,6 +444,24 @@ public class Node implements Closeable {
}
}
+ // visible for testing
+ static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) {
+ if (!version.isRelease() || isSnapshot) {
+ logger.warn(
+ "version [{}] is a pre-release version of Elasticsearch and is not suitable for production",
+ displayVersion(version, isSnapshot));
+ }
+ }
+
+ private static String displayVersion(final Version version, final boolean isSnapshot) {
+ return version + (isSnapshot ? "-SNAPSHOT" : "");
+ }
+
+ protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool,
+ TransportInterceptor interceptor) {
+ return new TransportService(settings, transport, threadPool, interceptor);
+ }
+
/**
* The settings that were used to create the node.
*/
@@ -438,12 +494,12 @@ public class Node implements Closeable {
/**
* Start the node. If the node is already started, this method is no-op.
*/
- public Node start() {
+ public Node start() throws NodeValidationException {
if (!lifecycle.moveToStarted()) {
return this;
}
- ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
+ Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("starting ...");
// hack around dependency injection problem (for now...)
injector.getInstance(Discovery.class).setAllocationService(injector.getInstance(AllocationService.class));
@@ -558,7 +614,7 @@ public class Node implements Closeable {
if (!lifecycle.moveToStopped()) {
return this;
}
- ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
+ Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("stopping ...");
injector.getInstance(TribeService.class).stop();
@@ -604,7 +660,7 @@ public class Node implements Closeable {
return;
}
- ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
+ Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");
@@ -680,6 +736,24 @@ public class Node implements Closeable {
}
IOUtils.close(toClose);
logger.info("closed");
+
+ final String log4jShutdownEnabled = System.getProperty("es.log4j.shutdownEnabled", "true");
+ final boolean shutdownEnabled;
+ switch (log4jShutdownEnabled) {
+ case "true":
+ shutdownEnabled = true;
+ break;
+ case "false":
+ shutdownEnabled = false;
+ break;
+ default:
+ throw new IllegalArgumentException(
+ "invalid value for [es.log4j.shutdownEnabled], was [" + log4jShutdownEnabled + "] but must be [true] or [false]");
+ }
+ if (shutdownEnabled) {
+ LoggerContext context = (LoggerContext) LogManager.getContext(false);
+ Configurator.shutdown(context);
+ }
}
@@ -705,7 +779,9 @@ public class Node implements Closeable {
* bound and publishing to
*/
@SuppressWarnings("unused")
- protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
+ protected void validateNodeBeforeAcceptingRequests(
+ final Settings settings,
+ final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
}
/** Writes a file to the logs dir containing the ports for the given transport type */
@@ -762,10 +838,12 @@ public class Node implements Closeable {
}
/**
- * Select the search service implementation. Overrided by tests.
+ * Creates a new the SearchService. This method can be overwritten by tests to inject mock implementations.
*/
- protected Class<? extends SearchService> pickSearchServiceImplementation() {
- return SearchService.class;
+ protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
+ ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
+ FetchPhase fetchPhase) {
+ return new SearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/node/NodeValidationException.java b/core/src/main/java/org/elasticsearch/node/NodeValidationException.java
new file mode 100644
index 0000000000..d4bedc49f0
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/node/NodeValidationException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+
+/**
+ * An exception thrown during node validation. Node validation runs immediately before a node
+ * begins accepting network requests in
+ * {@link Node#validateNodeBeforeAcceptingRequests(Settings, BoundTransportAddress)}. This
+ * exception is a checked exception that is declared as thrown from this method for the purpose
+ * of bubbling up to the user.
+ */
+public class NodeValidationException extends Exception {
+
+ /**
+ * Creates a node validation exception with the specified validation message to be displayed to
+ * the user.
+ *
+ * @param message the message to display to the user
+ */
+ public NodeValidationException(final String message) {
+ super(message);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java b/core/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java
new file mode 100644
index 0000000000..7de805b704
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An extension point for {@link Plugin} implementations to customer behavior of cluster management.
+ */
+public interface ClusterPlugin {
+
+ /**
+ * Return deciders used to customize where shards are allocated.
+ *
+ * @param settings Settings for the node
+ * @param clusterSettings Settings for the cluster
+ * @return Custom {@link AllocationDecider} instances
+ */
+ default Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
+ return Collections.emptyList();
+ }
+
+ /**
+ * Return {@link ShardsAllocator} implementations added by this plugin.
+ *
+ * The key of the returned {@link Map} is the name of the allocator, and the value
+ * is a function to construct the allocator.
+ *
+ * @param settings Settings for the node
+ * @param clusterSettings Settings for the cluster
+ * @return A map of allocator implementations
+ */
+ default Map<String, Supplier<ShardsAllocator>> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) {
+ return Collections.emptyMap();
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java
index 5dab19581a..90b1d32f4a 100644
--- a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java
+++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java
@@ -24,5 +24,6 @@ public class DummyPluginInfo extends PluginInfo {
super(name, description, version, classname);
}
- public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName");
+ public static final DummyPluginInfo INSTANCE = new DummyPluginInfo(
+ "dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName");
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java
index 25dec47862..ac21256aca 100644
--- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java
+++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java
@@ -192,6 +192,9 @@ class InstallPluginCommand extends SettingCommand {
// pkg private for testing
void execute(Terminal terminal, String pluginId, boolean isBatch, Map<String, String> settings) throws Exception {
+ if (pluginId == null) {
+ throw new UserException(ExitCodes.USAGE, "plugin id is required");
+ }
final Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
// TODO: remove this leniency!! is it needed anymore?
if (Files.exists(env.pluginsFile()) == false) {
@@ -212,11 +215,11 @@ class InstallPluginCommand extends SettingCommand {
final String stagingHash = System.getProperty(PROPERTY_STAGING_ID);
if (stagingHash != null) {
url = String.format(Locale.ROOT,
- "https://staging.elastic.co/%1$s/download/elasticsearch-plugins/%2$s/%2$s-%3$s.zip",
+ "https://staging.elastic.co/%3$s-%1$s/downloads/elasticsearch-plugins/%2$s/%2$s-%3$s.zip",
stagingHash, pluginId, version);
} else {
url = String.format(Locale.ROOT,
- "https://artifacts.elastic.co/download/elasticsearch-plugins/%1$s/%1$s-%2$s.zip",
+ "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%1$s/%1$s-%2$s.zip",
pluginId, version);
}
terminal.println("-> Downloading " + pluginId + " from elastic");
diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java
index bd2f853bac..ee81261c08 100644
--- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java
+++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java
@@ -60,8 +60,8 @@ class ListPluginsCommand extends SettingCommand {
}
Collections.sort(plugins);
for (final Path plugin : plugins) {
- terminal.println(plugin.getFileName().toString());
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
+ terminal.println(plugin.getFileName().toString() + "@" + info.getVersion());
terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
}
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java b/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java
new file mode 100644
index 0000000000..ee7187c885
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportInterceptor;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+
+/**
+ * Plugin for extending network and transport related classes
+ */
+public interface NetworkPlugin {
+
+ /**
+ * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing
+ * transport (inter-node) requests. This must not return <code>null</code>
+ */
+ default List<TransportInterceptor> getTransportInterceptors() {
+ return Collections.emptyList();
+ }
+
+ /**
+ * Returns a map of {@link Transport} suppliers.
+ * See {@link org.elasticsearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation.
+ */
+ default Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.emptyMap();
+ }
+ /**
+ * Returns a map of {@link HttpServerTransport} suppliers.
+ * See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation.
+ */
+ default Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.emptyMap();
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java
index 1c79986e18..1e39edc634 100644
--- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java
+++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java
@@ -31,6 +31,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
@@ -48,16 +49,23 @@ import java.util.Map;
import java.util.function.UnaryOperator;
/**
- * An extension point allowing to plug in custom functionality.
- * <p>
- * Implement any of these interfaces to extend Elasticsearch:
+ * An extension point allowing to plug in custom functionality. This class has a number of extension points that are available to all
+ * plugins, in addition you can implement any of the following interfaces to further customize Elasticsearch:
* <ul>
* <li>{@link ActionPlugin}
* <li>{@link AnalysisPlugin}
+ * <li>{@link ClusterPlugin}
+ * <li>{@link DiscoveryPlugin}
+ * <li>{@link IngestPlugin}
* <li>{@link MapperPlugin}
+ * <li>{@link NetworkPlugin}
+ * <li>{@link RepositoryPlugin}
* <li>{@link ScriptPlugin}
* <li>{@link SearchPlugin}
* </ul>
+ * <p>In addition to extension points this class also declares some {@code @Deprecated} {@code public final void onModule} methods. These
+ * methods should cause any extensions of {@linkplain Plugin} that used the pre-5.x style extension syntax to fail to build and point the
+ * plugin author at the new extension syntax. We hope that these make the process of upgrading a plugin from 2.x to 5.x only mildly painful.
*/
public abstract class Plugin {
@@ -141,7 +149,19 @@ public abstract class Plugin {
}
/**
- * Old-style guice index level extension point.
+ * Provides the list of this plugin's custom thread pools, empty if
+ * none.
+ *
+ * @param settings the current settings
+ * @return executors builders for this plugin's custom thread pools
+ */
+ public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
+ return Collections.emptyList();
+ }
+
+ /**
+ * Old-style guice index level extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated use #onIndexModule instead
*/
@@ -150,7 +170,8 @@ public abstract class Plugin {
/**
- * Old-style guice settings extension point.
+ * Old-style guice settings extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated use #getSettings and #getSettingsFilter instead
*/
@@ -158,7 +179,8 @@ public abstract class Plugin {
public final void onModule(SettingsModule settingsModule) {}
/**
- * Old-style guice scripting extension point.
+ * Old-style guice scripting extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated implement {@link ScriptPlugin} instead
*/
@@ -166,7 +188,8 @@ public abstract class Plugin {
public final void onModule(ScriptModule module) {}
/**
- * Old-style analysis extension point.
+ * Old-style analysis extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated implement {@link AnalysisPlugin} instead
*/
@@ -174,7 +197,8 @@ public abstract class Plugin {
public final void onModule(AnalysisModule module) {}
/**
- * Old-style action extension point.
+ * Old-style action extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated implement {@link ActionPlugin} instead
*/
@@ -182,7 +206,8 @@ public abstract class Plugin {
public final void onModule(ActionModule module) {}
/**
- * Old-style action extension point.
+ * Old-style action extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
* @deprecated implement {@link SearchPlugin} instead
*/
@@ -190,13 +215,11 @@ public abstract class Plugin {
public final void onModule(SearchModule module) {}
/**
- * Provides the list of this plugin's custom thread pools, empty if
- * none.
+ * Old-style action extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
+ * from 2.x.
*
- * @param settings the current settings
- * @return executors builders for this plugin's custom thread pools
+ * @deprecated implement {@link NetworkPlugin} instead
*/
- public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
- return Collections.emptyList();
- }
+ @Deprecated
+ public final void onModule(NetworkModule module) {}
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java
index 3ce60882cc..022f107d1f 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java
@@ -31,7 +31,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
*/
public class PluginCli extends MultiCommand {
- public PluginCli() {
+ private PluginCli() {
super("A tool for managing installed elasticsearch plugins");
subcommands.put("list", new ListPluginsCommand());
subcommands.put("install", new InstallPluginCommand());
@@ -39,23 +39,6 @@ public class PluginCli extends MultiCommand {
}
public static void main(String[] args) throws Exception {
- // initialize default for es.logger.level because we will not read the logging.yml
- String loggerLevel = System.getProperty("es.logger.level", "INFO");
- String pathHome = System.getProperty("es.path.home");
- // Set the appender for all potential log files to terminal so that other components that use the logger print out the
- // same terminal.
- // The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is
- // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch
- // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs.
- // Therefore we print to Terminal.
- Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
- .put("path.home", pathHome)
- .put("appender.terminal.type", "terminal")
- .put("rootLogger", "${logger.level}, terminal")
- .put("logger.level", loggerLevel)
- .build(), Terminal.DEFAULT);
- LogConfigurator.configure(loggingEnvironment.settings(), false);
-
exit(new PluginCli().main(args, Terminal.DEFAULT));
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
index 500861d899..3e241eadd3 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
@@ -22,7 +22,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -32,7 +32,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Properties;
-public class PluginInfo implements Streamable, ToXContent {
+public class PluginInfo implements Writeable, ToXContent {
public static final String ES_PLUGIN_PROPERTIES = "plugin-descriptor.properties";
public static final String ES_PLUGIN_POLICY = "plugin-security.policy";
@@ -45,13 +45,10 @@ public class PluginInfo implements Streamable, ToXContent {
static final String CLASSNAME = "classname";
}
- private String name;
- private String description;
- private String version;
- private String classname;
-
- public PluginInfo() {
- }
+ private final String name;
+ private final String description;
+ private final String version;
+ private final String classname;
/**
* Information about plugins
@@ -60,13 +57,28 @@ public class PluginInfo implements Streamable, ToXContent {
* @param description Its description
* @param version Version number
*/
- PluginInfo(String name, String description, String version, String classname) {
+ public PluginInfo(String name, String description, String version, String classname) {
this.name = name;
this.description = description;
this.version = version;
this.classname = classname;
}
+ public PluginInfo(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.description = in.readString();
+ this.version = in.readString();
+ this.classname = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(description);
+ out.writeString(version);
+ out.writeString(classname);
+ }
+
/** reads (and validates) plugin metadata descriptor file */
public static PluginInfo readFromProperties(Path dir) throws IOException {
Path descriptor = dir.resolve(ES_PLUGIN_PROPERTIES);
@@ -138,28 +150,6 @@ public class PluginInfo implements Streamable, ToXContent {
return version;
}
- public static PluginInfo readFromStream(StreamInput in) throws IOException {
- PluginInfo info = new PluginInfo();
- info.readFrom(in);
- return info;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- this.name = in.readString();
- this.description = in.readString();
- this.version = in.readString();
- this.classname = in.readString();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(name);
- out.writeString(description);
- out.writeString(version);
- out.writeString(classname);
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
index ccbde1310d..03139e565e 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
@@ -19,26 +19,9 @@
package org.elasticsearch.plugins;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
@@ -54,7 +37,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -62,6 +44,26 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.threadpool.ExecutorBuilder;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
/**
@@ -106,10 +108,9 @@ public class PluginsService extends AbstractComponent {
*/
public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDirectory, Collection<Class<? extends Plugin>> classpathPlugins) {
super(settings);
- info = new PluginsAndModules();
List<Tuple<PluginInfo, Plugin>> pluginsLoaded = new ArrayList<>();
-
+ List<PluginInfo> pluginsList = new ArrayList<>();
// first we load plugins that are on the classpath. this is for tests and transport clients
for (Class<? extends Plugin> pluginClass : classpathPlugins) {
Plugin plugin = loadPlugin(pluginClass, settings);
@@ -118,9 +119,10 @@ public class PluginsService extends AbstractComponent {
logger.trace("plugin loaded from classpath [{}]", pluginInfo);
}
pluginsLoaded.add(new Tuple<>(pluginInfo, plugin));
- info.addPlugin(pluginInfo);
+ pluginsList.add(pluginInfo);
}
+ List<PluginInfo> modulesList = new ArrayList<>();
// load modules
if (modulesDirectory != null) {
try {
@@ -128,7 +130,7 @@ public class PluginsService extends AbstractComponent {
List<Tuple<PluginInfo, Plugin>> loaded = loadBundles(bundles);
pluginsLoaded.addAll(loaded);
for (Tuple<PluginInfo, Plugin> module : loaded) {
- info.addModule(module.v1());
+ modulesList.add(module.v1());
}
} catch (IOException ex) {
throw new IllegalStateException("Unable to initialize modules", ex);
@@ -142,18 +144,19 @@ public class PluginsService extends AbstractComponent {
List<Tuple<PluginInfo, Plugin>> loaded = loadBundles(bundles);
pluginsLoaded.addAll(loaded);
for (Tuple<PluginInfo, Plugin> plugin : loaded) {
- info.addPlugin(plugin.v1());
+ pluginsList.add(plugin.v1());
}
} catch (IOException ex) {
throw new IllegalStateException("Unable to initialize plugins", ex);
}
}
- plugins = Collections.unmodifiableList(pluginsLoaded);
+ this.info = new PluginsAndModules(pluginsList, modulesList);
+ this.plugins = Collections.unmodifiableList(pluginsLoaded);
// We need to build a List of plugins for checking mandatory plugins
Set<String> pluginsNames = new HashSet<>();
- for (Tuple<PluginInfo, Plugin> tuple : plugins) {
+ for (Tuple<PluginInfo, Plugin> tuple : this.plugins) {
pluginsNames.add(tuple.v1().getName());
}
@@ -177,7 +180,7 @@ public class PluginsService extends AbstractComponent {
logPluginInfo(info.getPluginInfos(), "plugin", logger);
Map<Plugin, List<OnModuleReference>> onModuleReferences = new HashMap<>();
- for (Tuple<PluginInfo, Plugin> pluginEntry : plugins) {
+ for (Tuple<PluginInfo, Plugin> pluginEntry : this.plugins) {
Plugin plugin = pluginEntry.v2();
List<OnModuleReference> list = new ArrayList<>();
for (Method method : plugin.getClass().getMethods()) {
@@ -211,7 +214,7 @@ public class PluginsService extends AbstractComponent {
this.onModuleReferences = Collections.unmodifiableMap(onModuleReferences);
}
- private static void logPluginInfo(final List<PluginInfo> pluginInfos, final String type, final ESLogger logger) {
+ private static void logPluginInfo(final List<PluginInfo> pluginInfos, final String type, final Logger logger) {
assert pluginInfos != null;
if (pluginInfos.isEmpty()) {
logger.info("no " + type + "s loaded");
@@ -242,10 +245,10 @@ public class PluginsService extends AbstractComponent {
try {
reference.onModuleMethod.invoke(plugin.v2(), module);
} catch (IllegalAccessException | InvocationTargetException e) {
- logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v1().getName());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
throw new ElasticsearchException("failed to invoke onModule", e);
} catch (Exception e) {
- logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v1().getName());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
throw e;
}
}
@@ -346,7 +349,7 @@ public class PluginsService extends AbstractComponent {
}
static List<Bundle> getPluginBundles(Path pluginsDirectory) throws IOException {
- ESLogger logger = Loggers.getLogger(PluginsService.class);
+ Logger logger = Loggers.getLogger(PluginsService.class);
// TODO: remove this leniency, but tests bogusly rely on it
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java
index 0b4f8d281d..54cd34d674 100644
--- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java
+++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java
@@ -43,7 +43,7 @@ import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
/**
* A command for the plugin cli to remove a plugin from elasticsearch.
*/
-class RemovePluginCommand extends SettingCommand {
+final class RemovePluginCommand extends SettingCommand {
private final OptionSpec<String> arguments;
@@ -64,14 +64,16 @@ class RemovePluginCommand extends SettingCommand {
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
- Path pluginDir = env.pluginsFile().resolve(pluginName);
+ final Path pluginDir = env.pluginsFile().resolve(pluginName);
if (Files.exists(pluginDir) == false) {
- throw new UserException(ExitCodes.USAGE, "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins");
+ throw new UserException(
+ ExitCodes.USAGE,
+ "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins");
}
- List<Path> pluginPaths = new ArrayList<>();
+ final List<Path> pluginPaths = new ArrayList<>();
- Path pluginBinDir = env.binFile().resolve(pluginName);
+ final Path pluginBinDir = env.binFile().resolve(pluginName);
if (Files.exists(pluginBinDir)) {
if (Files.isDirectory(pluginBinDir) == false) {
throw new UserException(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory");
@@ -81,10 +83,19 @@ class RemovePluginCommand extends SettingCommand {
}
terminal.println(VERBOSE, "Removing: " + pluginDir);
- Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
+ final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
pluginPaths.add(tmpPluginDir);
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
+
+ // we preserve the config files in case the user is upgrading the plugin, but we print
+ // a message so the user knows in case they want to remove manually
+ final Path pluginConfigDir = env.configFile().resolve(pluginName);
+ if (Files.exists(pluginConfigDir)) {
+ terminal.println(
+ "-> Preserving plugin config files [" + pluginConfigDir + "] in case of upgrade, delete manually if not needed");
+ }
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/core/src/main/java/org/elasticsearch/plugins/SearchPlugin.java
index 97ee715ef6..364454de0c 100644
--- a/core/src/main/java/org/elasticsearch/plugins/SearchPlugin.java
+++ b/core/src/main/java/org/elasticsearch/plugins/SearchPlugin.java
@@ -30,6 +30,8 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParser;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
+import org.elasticsearch.search.SearchExtBuilder;
+import org.elasticsearch.search.SearchExtParser;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator;
@@ -83,6 +85,12 @@ public interface SearchPlugin {
return emptyList();
}
/**
+ * The new {@link SearchExtParser}s defined by this plugin.
+ */
+ default List<SearchExtSpec<?>> getSearchExts() {
+ return emptyList();
+ }
+ /**
* Get the {@link Highlighter}s defined by this plugin.
*/
default Map<String, Highlighter> getHighlighters() {
@@ -160,7 +168,7 @@ public interface SearchPlugin {
/**
* Specification for an {@link Aggregation}.
*/
- public static class AggregationSpec extends SearchExtensionSpec<AggregationBuilder, Aggregator.Parser> {
+ class AggregationSpec extends SearchExtensionSpec<AggregationBuilder, Aggregator.Parser> {
private final Map<String, Writeable.Reader<? extends InternalAggregation>> resultReaders = new TreeMap<>();
/**
@@ -217,7 +225,7 @@ public interface SearchPlugin {
/**
* Specification for a {@link PipelineAggregator}.
*/
- public static class PipelineAggregationSpec extends SearchExtensionSpec<PipelineAggregationBuilder, PipelineAggregator.Parser> {
+ class PipelineAggregationSpec extends SearchExtensionSpec<PipelineAggregationBuilder, PipelineAggregator.Parser> {
private final Map<String, Writeable.Reader<? extends InternalAggregation>> resultReaders = new TreeMap<>();
private final Writeable.Reader<? extends PipelineAggregator> aggregatorReader;
@@ -290,6 +298,19 @@ public interface SearchPlugin {
}
}
+ /**
+ * Specification for a {@link SearchExtBuilder} which represents an additional section that can be
+ * parsed in a search request (within the ext element).
+ */
+ class SearchExtSpec<T extends SearchExtBuilder> extends SearchExtensionSpec<T, SearchExtParser<T>> {
+ public SearchExtSpec(ParseField name, Writeable.Reader<? extends T> reader, SearchExtParser<T> parser) {
+ super(name, reader, parser);
+ }
+
+ public SearchExtSpec(String name, Writeable.Reader<? extends T> reader, SearchExtParser<T> parser) {
+ super(name, reader, parser);
+ }
+ }
/**
* Specification of search time behavior extension like a custom {@link MovAvgModel} or {@link ScoreFunction}.
diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java
index 076853fd75..e5951d48a0 100644
--- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java
+++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.repositories;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -140,7 +142,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
@Override
public void onFailure(String source, Exception e) {
- logger.warn("failed to create repository [{}]", e, request.name);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
super.onFailure(source, e);
}
@@ -214,7 +216,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
try {
repository.endVerification(verificationToken);
} catch (Exception e) {
- logger.warn("[{}] failed to finish repository verification", e, repositoryName);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
listener.onFailure(e);
return;
}
@@ -231,7 +233,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
repository.endVerification(verificationToken);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("[{}] failed to finish repository verification", inner, repositoryName);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
}
listener.onFailure(e);
}
@@ -293,14 +295,14 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
} catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it
- logger.warn("failed to change repository [{}]", ex, repositoryMetaData.name());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
}
}
} else {
try {
repository = createRepository(repositoryMetaData);
} catch (RepositoryException ex) {
- logger.warn("failed to create repository [{}]", ex, repositoryMetaData.name());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
}
}
if (repository != null) {
@@ -382,7 +384,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
repository.start();
return repository;
} catch (Exception e) {
- logger.warn("failed to create repository [{}][{}]", e, repositoryMetaData.type(), repositoryMetaData.name());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java
index 65544421c8..cc1170a484 100644
--- a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java
+++ b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java
@@ -19,14 +19,10 @@
package org.elasticsearch.repositories;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
import com.carrotsearch.hppc.ObjectContainer;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -45,6 +41,12 @@ import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+
public class VerifyNodeRepositoryAction extends AbstractComponent {
public static final String ACTION_NAME = "internal:admin/repository/verify";
@@ -62,10 +64,6 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler());
}
- public void close() {
- transportService.removeHandler(ACTION_NAME);
- }
-
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
final DiscoveryNodes discoNodes = clusterService.state().nodes();
final DiscoveryNode localNode = discoNodes.getLocalNode();
@@ -83,7 +81,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
try {
doVerify(repository, verificationToken, localNode);
} catch (Exception e) {
- logger.warn("[{}] failed to verify repository", e, repository);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
errors.add(new VerificationFailure(node.getId(), e));
}
if (counter.decrementAndGet() == 0) {
@@ -154,7 +152,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
try {
doVerify(request.repository, request.verificationToken, localNode);
} catch (Exception ex) {
- logger.warn("[{}] failed to verify repository", ex, request.repository);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex);
throw ex;
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 856b3ff426..a1bad51626 100644
--- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -19,6 +19,8 @@
package org.elasticsearch.repositories.blobstore;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooNewException;
@@ -33,6 +35,7 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
@@ -352,10 +355,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
snapshotInfo = getSnapshotInfo(snapshotId);
} catch (SnapshotException e) {
- logger.warn("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
- "the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " +
- "the repository but its data directories will remain.", e, getMetadata().name(),
- snapshotId, snapshotId.getUUID());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
+ "the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " +
+ "the repository but its data directories will remain.", getMetadata().name(), snapshotId, snapshotId.getUUID()), e);
continue;
}
for (final String indexName : snapshotInfo.indices()) {
@@ -393,7 +395,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} catch (SnapshotMissingException ex) {
throw ex;
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
- logger.warn("cannot read snapshot file [{}]", ex, snapshotId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
}
MetaData metaData = null;
try {
@@ -403,7 +405,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
}
} catch (IOException | SnapshotException ex) {
- logger.warn("cannot read metadata for snapshot [{}]", ex, snapshotId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
}
try {
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
@@ -423,7 +425,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getUUID());
} catch (IOException ex) {
- logger.warn("[{}] failed to delete metadata for index [{}]", ex, snapshotId, index);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
}
if (metaData != null) {
IndexMetaData indexMetaData = metaData.index(index);
@@ -432,7 +434,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
} catch (SnapshotException ex) {
- logger.warn("[{}] failed to delete shard data for shard [{}][{}]", ex, snapshotId, index, shardId);
+ final int finalShardId = shardId;
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
}
}
}
@@ -451,12 +454,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// we'll ignore that and accept that cleanup didn't fully succeed.
// since we are using UUIDs for path names, this won't be an issue for
// snapshotting indices of the same name
- logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
- "its index folder due to the directory not being empty.", dnee, metadata.name(), indexId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
+ "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
} catch (IOException ioe) {
// a different IOException occurred while trying to delete - will just log the issue for now
- logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
- "its index folder.", ioe, metadata.name(), indexId);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
+ "its index folder.", metadata.name(), indexId), ioe);
}
}
} catch (IOException ex) {
@@ -470,7 +473,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
snapshotFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId);
} catch (IOException e) {
- logger.warn("[{}] Unable to delete snapshot file [{}]", e, snapshotInfo.snapshotId(), blobId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e);
}
} else {
// we don't know the version, first try the current format, then the legacy format
@@ -482,7 +485,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
snapshotLegacyFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e2) {
// neither snapshot file could be deleted, log the error
- logger.warn("Unable to delete snapshot file [{}]", e, blobId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
}
}
}
@@ -494,7 +497,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
globalMetaDataFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId);
} catch (IOException e) {
- logger.warn("[{}] Unable to delete global metadata file [{}]", e, snapshotInfo.snapshotId(), blobId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e);
}
} else {
// we don't know the version, first try the current format, then the legacy format
@@ -506,7 +509,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
globalMetaDataLegacyFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e2) {
// neither global metadata file could be deleted, log the error
- logger.warn("Unable to delete global metadata file [{}]", e, blobId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
}
}
}
@@ -598,7 +601,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
metaDataBuilder.put(indexMetaDataFormat(snapshotVersion).read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
} catch (ElasticsearchParseException | IOException ex) {
if (ignoreIndexErrors) {
- logger.warn("[{}] [{}] failed to read metadata for index", ex, snapshotId, index.getName());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
} else {
throw ex;
}
@@ -618,10 +621,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private RateLimiter getRateLimiter(Settings repositorySettings, String setting, ByteSizeValue defaultRate) {
ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting,
settings.getAsBytesSize(setting, defaultRate));
- if (maxSnapshotBytesPerSec.bytes() <= 0) {
+ if (maxSnapshotBytesPerSec.getBytes() <= 0) {
return null;
} else {
- return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.mbFrac());
+ return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.getMbFrac());
}
}
@@ -1073,7 +1076,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
blobContainer.deleteBlob(blobName);
} catch (IOException e) {
// TODO: don't catch and let the user handle it?
- logger.debug("[{}] [{}] error deleting blob [{}] during cleanup", e, snapshotId, shardId, blobName);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e);
}
}
}
@@ -1150,7 +1153,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest));
return new Tuple<>(shardSnapshots, latest);
} catch (IOException e) {
- logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest);
+ final String file = SNAPSHOT_INDEX_PREFIX + latest;
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read index file [{}]", file), e);
}
}
@@ -1168,7 +1172,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
}
} catch (IOException e) {
- logger.warn("failed to read commit point [{}]", e, name);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e);
}
}
return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1);
@@ -1251,7 +1255,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// in a bwc compatible way.
maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
} catch (Exception e) {
- logger.warn("{} Can't calculate hash from blob for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
}
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
// a commit point file with the same name, size and checksum was already copied to repository
@@ -1524,7 +1528,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
} catch (IOException e) {
- logger.warn("{} Can't read metadata from store, will not reuse any local file while restoring", e, shardId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e);
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
}
@@ -1540,7 +1544,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata);
} catch (Exception e) {
// if the index is broken we might not be able to read it
- logger.warn("{} Can't calculate hash from blog for file [{}] [{}]", e, shardId, fileInfo.physicalName(), fileInfo.metadata());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
}
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
fileInfos.put(fileInfo.metadata().name(), fileInfo);
@@ -1577,6 +1581,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
index.totalRecoverFiles(), new ByteSizeValue(index.totalRecoverBytes()), index.reusedFileCount(), new ByteSizeValue(index.reusedFileCount()));
}
try {
+ // first, delete pre-existing files in the store that have the same name but are
+ // different (i.e. different length/checksum) from those being restored in the snapshot
+ for (final StoreFileMetaData storeFileMetaData : diff.different) {
+ IOUtils.deleteFiles(store.directory(), storeFileMetaData.name());
+ }
+ // restore the files from the snapshot to the Lucene store
for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
restoreFile(fileToRecover, store);
@@ -1636,6 +1646,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} else {
stream = new RateLimitingInputStream(partSliceStream, restoreRateLimiter, restoreRateLimitingTimeInNanos::inc);
}
+
try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) {
final byte[] buffer = new byte[BUFFER_SIZE];
int length;
diff --git a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
index 5888bd07c0..c028913d34 100644
--- a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
@@ -54,9 +54,9 @@ public class FsRepository extends BlobStoreRepository {
public static final Setting<String> REPOSITORIES_LOCATION_SETTING =
new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), Property.NodeScope);
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
- Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope);
+ Setting.byteSizeSetting("chunk_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<ByteSizeValue> REPOSITORIES_CHUNK_SIZE_SETTING =
- Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", Property.NodeScope);
+ Setting.byteSizeSetting("repositories.fs.chunk_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope);
public static final Setting<Boolean> REPOSITORIES_COMPRESS_SETTING =
Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope);
diff --git a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java
index f5d4f4eb69..f146267c9b 100644
--- a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java
+++ b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java
@@ -19,6 +19,7 @@
package org.elasticsearch.rest;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -26,9 +27,17 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.Predicate;
+
+import static java.util.stream.Collectors.toSet;
public abstract class AbstractRestChannel implements RestChannel {
+ private static final Predicate<String> INCLUDE_FILTER = f -> f.charAt(0) != '-';
+ private static final Predicate<String> EXCLUDE_FILTER = INCLUDE_FILTER.negate();
+
protected final RestRequest request;
protected final boolean detailedErrorsEnabled;
@@ -41,7 +50,7 @@ public abstract class AbstractRestChannel implements RestChannel {
@Override
public XContentBuilder newBuilder() throws IOException {
- return newBuilder(request.hasContent() ? request.content() : null, request.hasParam("filter_path"));
+ return newBuilder(request.hasContent() ? request.content() : null, true);
}
@Override
@@ -64,8 +73,15 @@ public abstract class AbstractRestChannel implements RestChannel {
contentType = XContentType.JSON;
}
- String[] filters = useFiltering ? request.paramAsStringArrayOrEmptyIfAll("filter_path") : null;
- XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(contentType), bytesOutput(), filters);
+ Set<String> includes = Collections.emptySet();
+ Set<String> excludes = Collections.emptySet();
+ if (useFiltering) {
+ Set<String> filters = Strings.splitStringByCommaToSet(request.param("filter_path", null));
+ includes = filters.stream().filter(INCLUDE_FILTER).collect(toSet());
+ excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet());
+ }
+
+ XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(contentType), bytesOutput(), includes, excludes);
if (request.paramAsBoolean("pretty", false)) {
builder.prettyPrint().lfAtEnd();
}
diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java
index fcf79962b6..7af8249bf2 100644
--- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java
+++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java
@@ -19,11 +19,13 @@
package org.elasticsearch.rest;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -111,7 +113,7 @@ public class BytesRestResponse extends RestResponse {
return this.status;
}
- private static final ESLogger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed");
+ private static final Logger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed");
private static XContentBuilder convert(RestChannel channel, RestStatus status, Exception e) throws IOException {
XContentBuilder builder = channel.newErrorBuilder().startObject();
@@ -123,9 +125,9 @@ public class BytesRestResponse extends RestResponse {
params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request());
} else {
if (status.getStatus() < 500) {
- SUPPRESSED_ERROR_LOGGER.debug("path: {}, params: {}", e, channel.request().rawPath(), channel.request().params());
+ SUPPRESSED_ERROR_LOGGER.debug((Supplier<?>) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
} else {
- SUPPRESSED_ERROR_LOGGER.warn("path: {}, params: {}", e, channel.request().rawPath(), channel.request().params());
+ SUPPRESSED_ERROR_LOGGER.warn((Supplier<?>) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
}
params = channel.request();
}
diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java
index d5ba350ff4..e63f35884e 100644
--- a/core/src/main/java/org/elasticsearch/rest/RestController.java
+++ b/core/src/main/java/org/elasticsearch/rest/RestController.java
@@ -19,6 +19,8 @@
package org.elasticsearch.rest;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
@@ -28,17 +30,12 @@ import org.elasticsearch.common.path.PathTrie;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.plugins.ActionPlugin;
import java.io.IOException;
import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
-import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.OK;
@@ -213,7 +210,7 @@ public class RestController extends AbstractLifecycleComponent {
channel.sendResponse(new BytesRestResponse(channel, e));
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.error("failed to send failure response for uri [{}]", inner, request.uri());
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner);
}
}
@@ -315,7 +312,7 @@ public class RestController extends AbstractLifecycleComponent {
try {
channel.sendResponse(new BytesRestResponse(channel, e));
} catch (IOException e1) {
- logger.error("Failed to send failure response for uri [{}]", e1, request.uri());
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("Failed to send failure response for uri [{}]", request.uri()), e1);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java
index 5074a12079..572da497c1 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/RestActionListener.java
@@ -19,8 +19,8 @@
package org.elasticsearch.rest.action;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
@@ -33,7 +33,7 @@ public abstract class RestActionListener<Response> implements ActionListener<Res
// we use static here so we won't have to pass the actual logger each time for a very rare case of logging
// where the settings don't matter that much
- private static ESLogger logger = Loggers.getLogger(RestResponseListener.class);
+ private static Logger logger = Loggers.getLogger(RestResponseListener.class);
protected final RestChannel channel;
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java
index 0c23b5346d..5f64bcf8aa 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java
@@ -21,6 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Priority;
@@ -57,9 +58,17 @@ public class RestClusterHealthAction extends BaseRestHandler {
if (waitForStatus != null) {
clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT)));
}
- clusterHealthRequest.waitForRelocatingShards(
- request.paramAsInt("wait_for_relocating_shards", clusterHealthRequest.waitForRelocatingShards()));
- clusterHealthRequest.waitForActiveShards(request.paramAsInt("wait_for_active_shards", clusterHealthRequest.waitForActiveShards()));
+ clusterHealthRequest.waitForNoRelocatingShards(
+ request.paramAsBoolean("wait_for_no_relocating_shards", clusterHealthRequest.waitForNoRelocatingShards()));
+ if (request.hasParam("wait_for_relocating_shards")) {
+ // wait_for_relocating_shards has been removed in favor of wait_for_no_relocating_shards
+ throw new IllegalArgumentException("wait_for_relocating_shards has been removed, " +
+ "use wait_for_no_relocating_shards [true/false] instead");
+ }
+ String waitForActiveShards = request.param("wait_for_active_shards");
+ if (waitForActiveShards != null) {
+ clusterHealthRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
+ }
clusterHealthRequest.waitForNodes(request.param("wait_for_nodes", clusterHealthRequest.waitForNodes()));
if (request.param("wait_for_events") != null) {
clusterHealthRequest.waitForEvents(Priority.valueOf(request.param("wait_for_events").toUpperCase(Locale.ROOT)));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java
index bd2fd54420..4333dfc027 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java
@@ -20,7 +20,6 @@
package org.elasticsearch.rest.action.admin.cluster;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
-import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
@@ -54,9 +53,10 @@ public class RestSnapshotsStatusAction extends BaseRestHandler {
if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) {
snapshots = Strings.EMPTY_ARRAY;
}
- SnapshotsStatusRequest snapshotsStatusResponse = snapshotsStatusRequest(repository).snapshots(snapshots);
+ SnapshotsStatusRequest snapshotsStatusRequest = snapshotsStatusRequest(repository).snapshots(snapshots);
+ snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable()));
- snapshotsStatusResponse.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusResponse.masterNodeTimeout()));
- client.admin().cluster().snapshotsStatus(snapshotsStatusResponse, new RestToXContentListener<SnapshotsStatusResponse>(channel));
+ snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout()));
+ client.admin().cluster().snapshotsStatus(snapshotsStatusRequest, new RestToXContentListener<>(channel));
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
index 7801aad8e0..04d0bf5761 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
@@ -50,7 +50,7 @@ public class RestAnalyzeAction extends BaseRestHandler {
public static final ParseField TEXT = new ParseField("text");
public static final ParseField FIELD = new ParseField("field");
public static final ParseField TOKENIZER = new ParseField("tokenizer");
- public static final ParseField TOKEN_FILTERS = new ParseField("filter", "token_filter");
+ public static final ParseField TOKEN_FILTERS = new ParseField("filter");
public static final ParseField CHAR_FILTERS = new ParseField("char_filter");
public static final ParseField EXPLAIN = new ParseField("explain");
public static final ParseField ATTRIBUTES = new ParseField("attributes");
@@ -77,7 +77,7 @@ public class RestAnalyzeAction extends BaseRestHandler {
if (request.hasParam("tokenizer")) {
analyzeRequest.tokenizer(request.param("tokenizer"));
}
- for (String filter : request.paramAsStringArray("filter", request.paramAsStringArray("token_filter", Strings.EMPTY_ARRAY))) {
+ for (String filter : request.paramAsStringArray("filter", Strings.EMPTY_ARRAY)) {
analyzeRequest.addTokenFilter(filter);
}
for (String charFilter : request.paramAsStringArray("char_filter", Strings.EMPTY_ARRAY)) {
@@ -144,7 +144,7 @@ public class RestAnalyzeAction extends BaseRestHandler {
analyzeRequest.addTokenFilter(parser.map());
} else {
throw new IllegalArgumentException(currentFieldName
- + " array element should contain token_filter's name or setting");
+ + " array element should contain filter's name or setting");
}
}
} else if (parseFieldMatcher.match(currentFieldName, Fields.CHAR_FILTERS)
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java
index 87d9eb671b..b027aeb8d6 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java
@@ -20,6 +20,7 @@ package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
@@ -49,7 +50,7 @@ public class RestIndexDeleteAliasesAction extends BaseRestHandler {
final String[] aliases = Strings.splitStringByCommaToArray(request.param("name"));
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
- indicesAliasesRequest.removeAlias(indices, aliases);
+ indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases));
indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<IndicesAliasesResponse>(channel));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java
index 62661ed519..f7546bd57d 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java
@@ -20,9 +20,7 @@ package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.client.node.NodeClient;
-import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@@ -103,12 +101,9 @@ public class RestIndexPutAliasAction extends BaseRestHandler {
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
- String[] aliases = new String[]{alias};
- IndicesAliasesRequest.AliasActions aliasAction = new AliasActions(AliasAction.Type.ADD, indices, aliases);
- indicesAliasesRequest.addAliasAction(aliasAction);
indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
-
+ IndicesAliasesRequest.AliasActions aliasAction = AliasActions.add().indices(indices).alias(alias);
if (routing != null) {
aliasAction.routing(routing);
}
@@ -121,6 +116,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler {
if (filter != null) {
aliasAction.filter(filter);
}
- client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<IndicesAliasesResponse>(channel));
+ indicesAliasesRequest.addAliasAction(aliasAction);
+ client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel));
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java
index b404d61d23..fe8a6a1662 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java
@@ -23,9 +23,12 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.client.node.NodeClient;
-import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.BaseRestHandler;
@@ -34,13 +37,17 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.AcknowledgedRestListener;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestIndicesAliasesAction extends BaseRestHandler {
+ static final ObjectParser<IndicesAliasesRequest, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>("aliases");
+ static {
+ PARSER.declareObjectArray((request, actions) -> {
+ for (AliasActions action: actions) {
+ request.addAliasAction(action);
+ }
+ }, AliasActions.PARSER, new ParseField("actions"));
+ }
@Inject
public RestIndicesAliasesAction(Settings settings, RestController controller) {
@@ -52,104 +59,12 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception {
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
+ indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) {
- // {
- // actions : [
- // { add : { index : "test1", alias : "alias1", filter : {"user" : "kimchy"} } }
- // { remove : { index : "test1", alias : "alias1" } }
- // ]
- // }
- indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
- XContentParser.Token token = parser.nextToken();
- if (token == null) {
- throw new IllegalArgumentException("No action is specified");
- }
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.START_ARRAY) {
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- if (token == XContentParser.Token.FIELD_NAME) {
- String action = parser.currentName();
- AliasAction.Type type;
- if ("add".equals(action)) {
- type = AliasAction.Type.ADD;
- } else if ("remove".equals(action)) {
- type = AliasAction.Type.REMOVE;
- } else {
- throw new IllegalArgumentException("Alias action [" + action + "] not supported");
- }
- String[] indices = null;
- String[] aliases = null;
- Map<String, Object> filter = null;
- String routing = null;
- boolean routingSet = false;
- String indexRouting = null;
- boolean indexRoutingSet = false;
- String searchRouting = null;
- boolean searchRoutingSet = false;
- String currentFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token.isValue()) {
- if ("index".equals(currentFieldName)) {
- indices = new String[] { parser.text() };
- } else if ("alias".equals(currentFieldName)) {
- aliases = new String[] { parser.text() };
- } else if ("routing".equals(currentFieldName)) {
- routing = parser.textOrNull();
- routingSet = true;
- } else if ("indexRouting".equals(currentFieldName)
- || "index-routing".equals(currentFieldName) || "index_routing".equals(currentFieldName)) {
- indexRouting = parser.textOrNull();
- indexRoutingSet = true;
- } else if ("searchRouting".equals(currentFieldName)
- || "search-routing".equals(currentFieldName) || "search_routing".equals(currentFieldName)) {
- searchRouting = parser.textOrNull();
- searchRoutingSet = true;
- }
- } else if (token == XContentParser.Token.START_ARRAY) {
- if ("indices".equals(currentFieldName)) {
- List<String> indexNames = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- String index = parser.text();
- indexNames.add(index);
- }
- indices = indexNames.toArray(new String[indexNames.size()]);
- }
- if ("aliases".equals(currentFieldName)) {
- List<String> aliasNames = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- String alias = parser.text();
- aliasNames.add(alias);
- }
- aliases = aliasNames.toArray(new String[aliasNames.size()]);
- }
- } else if (token == XContentParser.Token.START_OBJECT) {
- if ("filter".equals(currentFieldName)) {
- filter = parser.mapOrdered();
- }
- }
- }
-
- if (type == AliasAction.Type.ADD) {
- AliasActions aliasActions = new AliasActions(type, indices, aliases).filter(filter);
- if (routingSet) {
- aliasActions.routing(routing);
- }
- if (indexRoutingSet) {
- aliasActions.indexRouting(indexRouting);
- }
- if (searchRoutingSet) {
- aliasActions.searchRouting(searchRouting);
- }
- indicesAliasesRequest.addAliasAction(aliasActions);
- } else if (type == AliasAction.Type.REMOVE) {
- indicesAliasesRequest.removeAlias(indices, aliases);
- }
- }
- }
- }
- }
+ PARSER.parse(parser, indicesAliasesRequest, () -> ParseFieldMatcher.STRICT);
+ }
+ if (indicesAliasesRequest.getAliasActions().isEmpty()) {
+ throw new IllegalArgumentException("No action specified");
}
client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<IndicesAliasesResponse>(channel));
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java
index 398eb62c1f..3deba4c32f 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java
@@ -19,7 +19,6 @@
package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
-import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@@ -41,7 +40,6 @@ public class RestPutIndexTemplateAction extends BaseRestHandler {
controller.registerHandler(RestRequest.Method.POST, "/_template/{name}", this);
}
- @SuppressWarnings({"unchecked"})
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) {
PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name"));
@@ -51,6 +49,6 @@ public class RestPutIndexTemplateAction extends BaseRestHandler {
putRequest.create(request.paramAsBoolean("create", false));
putRequest.cause(request.param("cause", ""));
putRequest.source(request.content());
- client.admin().indices().putTemplate(putRequest, new AcknowledgedRestListener<PutIndexTemplateResponse>(channel));
+ client.admin().indices().putTemplate(putRequest, new AcknowledgedRestListener<>(channel));
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java
index 791fc0eee3..3877715395 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java
@@ -46,7 +46,9 @@ public class RestTypesExistsAction extends BaseRestHandler {
@Inject
public RestTypesExistsAction(Settings settings, RestController controller) {
super(settings);
- controller.registerHandler(HEAD, "/{index}/{type}", this);
+ controller.registerWithDeprecatedHandler(
+ HEAD, "/{index}/_mapping/{type}", this,
+ HEAD, "/{index}/{type}", deprecationLogger);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java
index 7649d59af9..c813878088 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java
@@ -126,10 +126,10 @@ public class RestAllocationAction extends AbstractCatAction {
//if we don't know how much we use (non data nodes), it means 0
long used = 0;
short diskPercent = -1;
- if (total.bytes() > 0) {
- used = total.bytes() - avail.bytes();
- if (used >= 0 && avail.bytes() >= 0) {
- diskPercent = (short) (used * 100 / (used + avail.bytes()));
+ if (total.getBytes() > 0) {
+ used = total.getBytes() - avail.getBytes();
+ if (used >= 0 && avail.getBytes() >= 0) {
+ diskPercent = (short) (used * 100 / (used + avail.getBytes()));
}
}
@@ -137,8 +137,8 @@ public class RestAllocationAction extends AbstractCatAction {
table.addCell(shardCount);
table.addCell(nodeStats.getIndices().getStore().getSize());
table.addCell(used < 0 ? null : new ByteSizeValue(used));
- table.addCell(avail.bytes() < 0 ? null : avail);
- table.addCell(total.bytes() < 0 ? null : total);
+ table.addCell(avail.getBytes() < 0 ? null : avail);
+ table.addCell(total.getBytes() < 0 ? null : total);
table.addCell(diskPercent < 0 ? null : diskPercent);
table.addCell(node.getHostName());
table.addCell(node.getHostAddress());
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
index 3c65e32c74..782c0ea444 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
@@ -27,9 +27,10 @@ import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.support.IndicesOptions;
-import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.client.Requests;
+import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@@ -314,16 +315,32 @@ public class RestIndicesAction extends AbstractCatAction {
}
// package private for testing
- Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse health, IndicesStatsResponse stats, MetaData indexMetaDatas) {
+ Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse response, IndicesStatsResponse stats, MetaData indexMetaDatas) {
+ final String healthParam = request.param("health");
+ final ClusterHealthStatus status;
+ if (healthParam != null) {
+ status = ClusterHealthStatus.fromString(healthParam);
+ } else {
+ status = null;
+ }
+
Table table = getTableWithHeader(request);
for (final Index index : indices) {
final String indexName = index.getName();
- ClusterIndexHealth indexHealth = health.getIndices().get(indexName);
+ ClusterIndexHealth indexHealth = response.getIndices().get(indexName);
IndexStats indexStats = stats.getIndices().get(indexName);
IndexMetaData indexMetaData = indexMetaDatas.getIndices().get(indexName);
IndexMetaData.State state = indexMetaData.getState();
+ if (status != null) {
+ if (state == IndexMetaData.State.CLOSE ||
+ (indexHealth == null && !ClusterHealthStatus.RED.equals(status)) ||
+ !indexHealth.getStatus().equals(status)) {
+ continue;
+ }
+ }
+
table.startRow();
table.addCell(state == IndexMetaData.State.OPEN ? (indexHealth == null ? "red*" : indexHealth.getStatus().toString().toLowerCase(Locale.ROOT)) : null);
table.addCell(state.toString().toLowerCase(Locale.ROOT));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
index 54861f4e81..b0ab8db8b2 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
@@ -24,6 +24,8 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.Table;
import org.elasticsearch.common.inject.Inject;
@@ -147,14 +149,16 @@ public class RestRecoveryAction extends AbstractCatAction {
t.addCell(index);
t.addCell(state.getShardId().id());
t.addCell(new TimeValue(state.getTimer().time()));
- t.addCell(state.getType().toString().toLowerCase(Locale.ROOT));
+ t.addCell(state.getRecoverySource().getType().toString().toLowerCase(Locale.ROOT));
t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT));
t.addCell(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName());
t.addCell(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName());
t.addCell(state.getTargetNode().getHostName());
t.addCell(state.getTargetNode().getName());
- t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getRepository());
- t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getSnapshotId().getName());
+ t.addCell(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? "n/a" :
+ ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository());
+ t.addCell(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? "n/a" :
+ ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName());
t.addCell(state.getIndex().totalRecoverFiles());
t.addCell(state.getIndex().recoveredFileCount());
t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent()));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java
index bcedf86436..2dd4b6a10d 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java
@@ -43,6 +43,8 @@ import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestActionListener;
import org.elasticsearch.rest.action.RestResponseListener;
+import java.util.Locale;
+
import static org.elasticsearch.rest.RestRequest.Method.GET;
public class RestShardsAction extends AbstractCatAction {
@@ -104,6 +106,8 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell("unassigned.for", "alias:uf;default:false;text-align:right;desc:time has been unassigned");
table.addCell("unassigned.details", "alias:ud;default:false;desc:additional details as to why the shard became unassigned");
+ table.addCell("recoverysource.type", "alias:rs;default:false;desc:recovery source type");
+
table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
@@ -240,6 +244,12 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell(null);
}
+ if (shard.recoverySource() != null) {
+ table.addCell(shard.recoverySource().getType().toString().toLowerCase(Locale.ROOT));
+ } else {
+ table.addCell(null);
+ }
+
table.addCell(commonStats == null ? null : commonStats.getCompletion().getSize());
table.addCell(commonStats == null ? null : commonStats.getFieldData().getMemorySize());
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java
index 78d30407ff..5fad57da66 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java
@@ -302,17 +302,17 @@ public class RestTable {
ByteSizeValue v = (ByteSizeValue) value;
String resolution = request.param("bytes");
if ("b".equals(resolution)) {
- return Long.toString(v.bytes());
+ return Long.toString(v.getBytes());
} else if ("k".equals(resolution) || "kb".equals(resolution)) {
- return Long.toString(v.kb());
+ return Long.toString(v.getKb());
} else if ("m".equals(resolution) || "mb".equals(resolution)) {
- return Long.toString(v.mb());
+ return Long.toString(v.getMb());
} else if ("g".equals(resolution) || "gb".equals(resolution)) {
- return Long.toString(v.gb());
+ return Long.toString(v.getGb());
} else if ("t".equals(resolution) || "tb".equals(resolution)) {
- return Long.toString(v.tb());
+ return Long.toString(v.getTb());
} else if ("p".equals(resolution) || "pb".equals(resolution)) {
- return Long.toString(v.pb());
+ return Long.toString(v.getPb());
} else {
return v.toString();
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java
new file mode 100644
index 0000000000..5ee92cbb76
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.action.RestResponseListener;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestTemplatesAction extends AbstractCatAction {
+ @Inject
+ public RestTemplatesAction(Settings settings, RestController controller) {
+ super(settings);
+ controller.registerHandler(GET, "/_cat/templates", this);
+ controller.registerHandler(GET, "/_cat/templates/{name}", this);
+ }
+
+ @Override
+ protected void documentation(StringBuilder sb) {
+ sb.append("/_cat/templates\n");
+ }
+
+ @Override
+ protected void doRequest(final RestRequest request, RestChannel channel, NodeClient client) {
+ final String matchPattern = request.hasParam("name") ? request.param("name") : null;
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().metaData(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new RestResponseListener<ClusterStateResponse>(channel) {
+ @Override
+ public RestResponse buildResponse(ClusterStateResponse clusterStateResponse) throws Exception {
+ return RestTable.buildResponse(buildTable(request, clusterStateResponse, matchPattern), channel);
+ }
+ });
+ }
+
+ @Override
+ protected Table getTableWithHeader(RestRequest request) {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("name", "alias:n;desc:template name");
+ table.addCell("template", "alias:t;desc:template pattern string");
+ table.addCell("order", "alias:o;desc:template application order number");
+ table.addCell("version", "alias:v;desc:version");
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, ClusterStateResponse clusterStateResponse, String patternString) {
+ Table table = getTableWithHeader(request);
+ MetaData metadata = clusterStateResponse.getState().metaData();
+ for (ObjectObjectCursor<String, IndexTemplateMetaData> entry : metadata.templates()) {
+ IndexTemplateMetaData indexData = entry.value;
+ if (patternString == null || Regex.simpleMatch(patternString, indexData.name())) {
+ table.startRow();
+ table.addCell(indexData.name());
+ table.addCell(indexData.getTemplate());
+ table.addCell(indexData.getOrder());
+ table.addCell(indexData.getVersion());
+ table.endRow();
+ }
+ }
+ return table;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index f5dca3f22c..4d5d3543cb 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -24,10 +24,12 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.support.ActiveShardCount;
-import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.client.Requests;
+import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
@@ -37,6 +39,7 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestBuilderListener;
+import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
@@ -52,6 +55,8 @@ import static org.elasticsearch.rest.RestStatus.OK;
* </pre>
*/
public class RestBulkAction extends BaseRestHandler {
+ private static final DeprecationLogger DEPRECATION_LOGGER =
+ new DeprecationLogger(Loggers.getLogger(RestBulkAction.class));
private final boolean allowExplicitIndex;
@@ -75,18 +80,21 @@ public class RestBulkAction extends BaseRestHandler {
String defaultIndex = request.param("index");
String defaultType = request.param("type");
String defaultRouting = request.param("routing");
+ FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String fieldsParam = request.param("fields");
- String defaultPipeline = request.param("pipeline");
+ if (fieldsParam != null) {
+ DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
+ }
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
-
+ String defaultPipeline = request.param("pipeline");
String waitForActiveShards = request.param("wait_for_active_shards");
if (waitForActiveShards != null) {
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
- bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline,
- null, allowExplicitIndex);
+ bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields,
+ defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex);
client.bulk(bulkRequest, new RestBuilderListener<BulkResponse>(channel) {
@Override
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
index 8c782a8d12..550abb3e3b 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
@@ -58,13 +58,15 @@ public class RestGetAction extends BaseRestHandler {
getRequest.parent(request.param("parent"));
getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
- getRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
-
- String sField = request.param("fields");
+ if (request.param("fields") != null) {
+ throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
+ "please use [stored_fields] to retrieve stored fields or or [_source] to load the field from _source");
+ }
+ String sField = request.param("stored_fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
- getRequest.fields(sFields);
+ getRequest.storedFields(sFields);
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java
index 9fb706bd8e..ad2f826e58 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java
@@ -91,7 +91,7 @@ public abstract class RestHeadAction extends BaseRestHandler {
getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
// don't get any fields back...
- getRequest.fields(Strings.EMPTY_ARRAY);
+ getRequest.storedFields(Strings.EMPTY_ARRAY);
// TODO we can also just return the document size as Content-Length
client.get(getRequest, new RestResponseListener<GetResponse>(channel) {
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java
index 995c43059d..07d221fed8 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java
@@ -59,10 +59,12 @@ public class RestMultiGetAction extends BaseRestHandler {
multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
multiGetRequest.preference(request.param("preference"));
multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime()));
- multiGetRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
-
+ if (request.param("fields") != null) {
+ throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
+ "please use [stored_fields] to retrieve stored fields or _source filtering if the field is not stored");
+ }
String[] sFields = null;
- String sField = request.param("fields");
+ String sField = request.param("stored_fields");
if (sField != null) {
sFields = Strings.splitStringByCommaToArray(sField);
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
index d0d7916adf..91f71e7249 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
@@ -25,6 +25,8 @@ import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.BaseRestHandler;
@@ -33,12 +35,15 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
+import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import static org.elasticsearch.rest.RestRequest.Method.POST;
/**
*/
public class RestUpdateAction extends BaseRestHandler {
+ private static final DeprecationLogger DEPRECATION_LOGGER =
+ new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
@Inject
public RestUpdateAction(Settings settings, RestController controller) {
@@ -58,13 +63,19 @@ public class RestUpdateAction extends BaseRestHandler {
updateRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
updateRequest.docAsUpsert(request.paramAsBoolean("doc_as_upsert", updateRequest.docAsUpsert()));
+ FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String sField = request.param("fields");
+ if (sField != null && fetchSourceContext != null) {
+ throw new IllegalArgumentException("[fields] and [_source] cannot be used in the same request");
+ }
if (sField != null) {
+ DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
String[] sFields = Strings.splitStringByCommaToArray(sField);
- if (sFields != null) {
- updateRequest.fields(sFields);
- }
+ updateRequest.fields(sFields);
+ } else if (fetchSourceContext != null) {
+ updateRequest.fetchSource(fetchSourceContext);
}
+
updateRequest.retryOnConflict(request.paramAsInt("retry_on_conflict", updateRequest.retryOnConflict()));
updateRequest.version(RestActions.parseVersion(request));
updateRequest.versionType(VersionType.fromString(request.param("version_type"), updateRequest.versionType()));
@@ -72,7 +83,7 @@ public class RestUpdateAction extends BaseRestHandler {
// see if we have it in the body
if (request.hasContent()) {
- updateRequest.source(request.content());
+ updateRequest.fromXContent(request.content());
IndexRequest upsertRequest = updateRequest.upsertRequest();
if (upsertRequest != null) {
upsertRequest.routing(request.param("routing"));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java
index 7088b96c6d..597bf3db61 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java
@@ -78,11 +78,15 @@ public class RestExplainAction extends BaseRestHandler {
explainRequest.query(query);
}
- String sField = request.param("fields");
+ if (request.param("fields") != null) {
+ throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
+ "please use [stored_fields] to retrieve stored fields");
+ }
+ String sField = request.param("stored_fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
- explainRequest.fields(sFields);
+ explainRequest.storedFields(sFields);
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java
index ae320bccac..a54e40be73 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java
@@ -19,10 +19,6 @@
package org.elasticsearch.rest.action.search;
-import java.io.IOException;
-import java.util.Map;
-import java.util.function.BiConsumer;
-
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@@ -40,12 +36,16 @@ import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.rest.action.RestToXContentListener;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.search.builder.SearchSourceBuilder;
+import java.io.IOException;
+import java.util.Map;
+import java.util.function.BiConsumer;
+
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
@@ -97,7 +97,7 @@ public class RestMultiSearchAction extends BaseRestHandler {
final QueryParseContext queryParseContext = new QueryParseContext(searchRequestParsers.queryParsers,
requestParser, parseFieldMatcher);
searchRequest.source(SearchSourceBuilder.fromXContent(queryParseContext,
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters));
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers));
multiRequest.add(searchRequest);
} catch (IOException e) {
throw new ElasticsearchParseException("Exception when parsing search request", e);
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index f75eccbd30..8acfc72dfe 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -24,7 +24,6 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
@@ -33,7 +32,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
-import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
@@ -42,13 +40,12 @@ import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchRequestParsers;
-import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.SuggestBuilder;
-import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode;
import java.io.IOException;
@@ -105,7 +102,8 @@ public class RestSearchAction extends BaseRestHandler {
if (restContent != null) {
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
QueryParseContext context = new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher);
- searchRequest.source().parseXContent(context, searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequest.source().parseXContent(context, searchRequestParsers.aggParsers, searchRequestParsers.suggesters,
+ searchRequestParsers.searchExtParsers);
}
}
@@ -178,18 +176,11 @@ public class RestSearchAction extends BaseRestHandler {
"if the field is not stored");
}
- String sField = request.param("stored_fields");
- if (sField != null) {
- if (!Strings.hasText(sField)) {
- searchSourceBuilder.noStoredFields();
- } else {
- String[] sFields = Strings.splitStringByCommaToArray(sField);
- if (sFields != null) {
- for (String field : sFields) {
- searchSourceBuilder.storedField(field);
- }
- }
- }
+
+ StoredFieldsContext storedFieldsContext =
+ StoredFieldsContext.fromRestRequest(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), request);
+ if (storedFieldsContext != null) {
+ searchSourceBuilder.storedFields(storedFieldsContext);
}
String sDocValueFields = request.param("docvalue_fields");
if (sDocValueFields == null) {
diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
index 191c2b4bcf..69c8b92ba1 100644
--- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
+++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
@@ -72,10 +72,10 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri
@Override
public SearchScript search(CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
final NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript.compiled();
+ final AbstractSearchScript script = (AbstractSearchScript) scriptFactory.newScript(vars);
return new SearchScript() {
@Override
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
- AbstractSearchScript script = (AbstractSearchScript) scriptFactory.newScript(vars);
script.setLookup(lookup.getLeafSearchLookup(context));
return script;
}
diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java
index cedae963ca..94abb43bc0 100644
--- a/core/src/main/java/org/elasticsearch/script/Script.java
+++ b/core/src/main/java/org/elasticsearch/script/Script.java
@@ -44,6 +44,7 @@ import java.util.Objects;
public final class Script implements ToXContent, Writeable {
public static final ScriptType DEFAULT_TYPE = ScriptType.INLINE;
+ public static final String DEFAULT_SCRIPT_LANG = "painless";
private String script;
private ScriptType type;
@@ -60,7 +61,7 @@ public final class Script implements ToXContent, Writeable {
this(script, ScriptType.INLINE, null, null);
}
- public Script(String script, ScriptType type, @Nullable String lang, @Nullable Map<String, ?> params) {
+ public Script(String script, ScriptType type, String lang, @Nullable Map<String, ?> params) {
this(script, type, lang, params, null);
}
@@ -78,14 +79,14 @@ public final class Script implements ToXContent, Writeable {
* when serializing the script back to xcontent.
*/
@SuppressWarnings("unchecked")
- public Script(String script, ScriptType type, @Nullable String lang, @Nullable Map<String, ?> params,
+ public Script(String script, ScriptType type, String lang, @Nullable Map<String, ?> params,
@Nullable XContentType contentType) {
if (contentType != null && type != ScriptType.INLINE) {
throw new IllegalArgumentException("The parameter contentType only makes sense for inline scripts");
}
this.script = Objects.requireNonNull(script);
this.type = Objects.requireNonNull(type);
- this.lang = lang;
+ this.lang = lang == null ? DEFAULT_SCRIPT_LANG : lang;
this.params = (Map<String, Object>) params;
this.contentType = contentType;
}
@@ -135,7 +136,7 @@ public final class Script implements ToXContent, Writeable {
* @return The type of script -- inline, stored, or file.
*/
public ScriptType getType() {
- return type == null ? DEFAULT_TYPE : type;
+ return type;
}
/**
@@ -196,7 +197,7 @@ public final class Script implements ToXContent, Writeable {
token = parser.nextToken();
}
if (token == XContentParser.Token.VALUE_STRING) {
- return new Script(parser.text());
+ return new Script(parser.text(), ScriptType.INLINE, lang, null);
}
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("expected a string value or an object, but found [{}] instead", token);
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
index d8c2b622d1..979bffb4bc 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
@@ -24,6 +24,7 @@ import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -65,7 +66,7 @@ public final class ScriptMetaData implements MetaData.Custom {
if (scriptAsBytes == null) {
return null;
}
- return parseStoredScript(scriptAsBytes);
+ return scriptAsBytes.utf8ToString();
}
public static String parseStoredScript(BytesReference scriptAsBytes) {
@@ -78,6 +79,9 @@ public final class ScriptMetaData implements MetaData.Custom {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) {
parser.nextToken();
parser.nextToken();
+ if (parser.currentToken() == Token.END_OBJECT) {
+ throw new IllegalArgumentException("Empty script");
+ }
switch (parser.currentName()) {
case "script":
case "template":
@@ -115,10 +119,8 @@ public final class ScriptMetaData implements MetaData.Custom {
case FIELD_NAME:
key = parser.currentName();
break;
- case START_OBJECT:
- XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent());
- contentBuilder.copyCurrentStructure(parser);
- scripts.put(key, new ScriptAsBytes(contentBuilder.bytes()));
+ case VALUE_STRING:
+ scripts.put(key, new ScriptAsBytes(new BytesArray(parser.text())));
break;
default:
throw new ParsingException(parser.getTokenLocation(), "Unexpected token [" + token + "]");
@@ -147,7 +149,7 @@ public final class ScriptMetaData implements MetaData.Custom {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
for (Map.Entry<String, ScriptAsBytes> entry : scripts.entrySet()) {
- builder.rawField(entry.getKey(), entry.getValue().script);
+ builder.field(entry.getKey(), entry.getValue().script.utf8ToString());
}
return builder;
}
@@ -188,8 +190,8 @@ public final class ScriptMetaData implements MetaData.Custom {
@Override
public String toString() {
return "ScriptMetaData{" +
- "scripts=" + scripts +
- '}';
+ "scripts=" + scripts +
+ '}';
}
static String toKey(String language, String id) {
@@ -216,7 +218,8 @@ public final class ScriptMetaData implements MetaData.Custom {
}
public Builder storeScript(String lang, String id, BytesReference script) {
- scripts.put(toKey(lang, id), new ScriptAsBytes(script));
+ BytesReference scriptBytest = new BytesArray(parseStoredScript(script));
+ scripts.put(toKey(lang, id), new ScriptAsBytes(scriptBytest));
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java
index 793c87077e..9e61f39378 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptService.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.script;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
@@ -90,8 +92,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
public static final Setting<Integer> SCRIPT_MAX_COMPILATIONS_PER_MINUTE =
Setting.intSetting("script.max_compilations_per_minute", 15, 0, Property.Dynamic, Property.NodeScope);
- private final String defaultLang;
-
private final Collection<ScriptEngineService> scriptEngines;
private final Map<String, ScriptEngineService> scriptEnginesByLang;
private final Map<String, ScriptEngineService> scriptEnginesByExt;
@@ -129,8 +129,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
this.scriptContextRegistry = scriptContextRegistry;
int cacheMaxSize = SCRIPT_CACHE_SIZE_SETTING.get(settings);
- this.defaultLang = scriptSettings.getDefaultScriptLanguageSetting().get(settings);
-
CacheBuilder<CacheKey, CompiledScript> cacheBuilder = CacheBuilder.builder();
if (cacheMaxSize >= 0) {
cacheBuilder.setMaximumWeight(cacheMaxSize);
@@ -220,11 +218,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
}
String lang = script.getLang();
-
- if (lang == null) {
- lang = defaultLang;
- }
-
ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang);
if (canExecuteScript(lang, script.getType(), scriptContext) == false) {
throw new IllegalStateException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled");
@@ -283,7 +276,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
throw new IllegalArgumentException("The parameter script (Script) must not be null.");
}
- String lang = script.getLang() == null ? defaultLang : script.getLang();
+ String lang = script.getLang();
ScriptType type = script.getType();
//script.getScript() could return either a name or code for a script,
//but we check for a file script name first and an indexed script name second
@@ -362,9 +355,8 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
}
private String validateScriptLanguage(String scriptLang) {
- if (scriptLang == null) {
- scriptLang = defaultLang;
- } else if (scriptEnginesByLang.containsKey(scriptLang) == false) {
+ Objects.requireNonNull(scriptLang);
+ if (scriptEnginesByLang.containsKey(scriptLang) == false) {
throw new IllegalArgumentException("script_lang not supported [" + scriptLang + "]");
}
return scriptLang;
@@ -527,8 +519,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
"Limit of script size in bytes [{}] has been exceeded for script [{}] with size [{}]",
allowedScriptSizeInBytes,
identifier,
- scriptSizeInBytes
- );
+ scriptSizeInBytes);
throw new IllegalArgumentException(message);
}
}
@@ -605,7 +596,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath());
}
} catch (Exception e) {
- logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to load/compile script [{}]", scriptNameExt.v1()), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java
index e315f8d816..1cb2b35624 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java
@@ -32,7 +32,16 @@ import java.util.function.Function;
public class ScriptSettings {
- public static final String DEFAULT_LANG = "groovy";
+ static final String LEGACY_DEFAULT_LANG = "groovy";
+
+ /**
+ * The default script language to use for scripts that are stored in documents that have no script lang set explicitly.
+ * This setting is legacy setting and only applies for indices created on ES versions prior to version 5.0
+ *
+ * This constant will be removed in the next major release.
+ */
+ @Deprecated
+ public static final String LEGACY_SCRIPT_SETTING = "script.legacy.default_lang";
private static final Map<ScriptService.ScriptType, Setting<Boolean>> SCRIPT_TYPE_SETTING_MAP;
@@ -49,7 +58,7 @@ public class ScriptSettings {
private final Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap;
private final List<Setting<Boolean>> scriptLanguageSettings;
- private final Setting<String> defaultScriptLanguageSetting;
+ private final Setting<String> defaultLegacyScriptLanguageSetting;
public ScriptSettings(ScriptEngineRegistry scriptEngineRegistry, ScriptContextRegistry scriptContextRegistry) {
Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap = contextSettings(scriptContextRegistry);
@@ -58,8 +67,8 @@ public class ScriptSettings {
List<Setting<Boolean>> scriptLanguageSettings = languageSettings(SCRIPT_TYPE_SETTING_MAP, scriptContextSettingMap, scriptEngineRegistry, scriptContextRegistry);
this.scriptLanguageSettings = Collections.unmodifiableList(scriptLanguageSettings);
- this.defaultScriptLanguageSetting = new Setting<>("script.default_lang", DEFAULT_LANG, setting -> {
- if (!"groovy".equals(setting) && !scriptEngineRegistry.getRegisteredLanguages().containsKey(setting)) {
+ this.defaultLegacyScriptLanguageSetting = new Setting<>(LEGACY_SCRIPT_SETTING, LEGACY_DEFAULT_LANG, setting -> {
+ if (!LEGACY_DEFAULT_LANG.equals(setting) && !scriptEngineRegistry.getRegisteredLanguages().containsKey(setting)) {
throw new IllegalArgumentException("unregistered default language [" + setting + "]");
}
return setting;
@@ -97,9 +106,25 @@ public class ScriptSettings {
}
final boolean defaultIfNothingSet = defaultLangAndType;
+ Function<Settings, String> defaultLangAndTypeFn = settings -> {
+ final Setting<Boolean> globalTypeSetting = scriptTypeSettingMap.get(scriptType);
+ final Setting<Boolean> langAndTypeSetting = Setting.boolSetting(ScriptModes.getGlobalKey(language, scriptType),
+ defaultIfNothingSet, Property.NodeScope);
+
+ if (langAndTypeSetting.exists(settings)) {
+ // fine-grained e.g. script.engine.groovy.inline
+ return langAndTypeSetting.get(settings).toString();
+ } else if (globalTypeSetting.exists(settings)) {
+ // global type - script.inline
+ return globalTypeSetting.get(settings).toString();
+ } else {
+ return Boolean.toString(defaultIfNothingSet);
+ }
+ };
+
// Setting for something like "script.engine.groovy.inline"
final Setting<Boolean> langAndTypeSetting = Setting.boolSetting(ScriptModes.getGlobalKey(language, scriptType),
- defaultLangAndType, Property.NodeScope);
+ defaultLangAndTypeFn, Property.NodeScope);
scriptModeSettings.add(langAndTypeSetting);
for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
@@ -144,7 +169,7 @@ public class ScriptSettings {
settings.addAll(SCRIPT_TYPE_SETTING_MAP.values());
settings.addAll(scriptContextSettingMap.values());
settings.addAll(scriptLanguageSettings);
- settings.add(defaultScriptLanguageSetting);
+ settings.add(defaultLegacyScriptLanguageSetting);
return settings;
}
@@ -152,7 +177,11 @@ public class ScriptSettings {
return scriptLanguageSettings;
}
- public Setting<String> getDefaultScriptLanguageSetting() {
- return defaultScriptLanguageSetting;
+ public Setting<String> getDefaultLegacyScriptLanguageSetting() {
+ return defaultLegacyScriptLanguageSetting;
+ }
+
+ public static String getLegacyDefaultLang(Settings settings) {
+ return settings.get(LEGACY_SCRIPT_SETTING, ScriptSettings.LEGACY_DEFAULT_LANG);
}
}
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptStats.java b/core/src/main/java/org/elasticsearch/script/ScriptStats.java
index c08d220d57..33f5dc2187 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptStats.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptStats.java
@@ -21,39 +21,22 @@ package org.elasticsearch.script;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class ScriptStats implements Streamable, ToXContent {
- private long compilations;
- private long cacheEvictions;
-
- public ScriptStats() {
- }
+public class ScriptStats implements Writeable, ToXContent {
+ private final long compilations;
+ private final long cacheEvictions;
public ScriptStats(long compilations, long cacheEvictions) {
this.compilations = compilations;
this.cacheEvictions = cacheEvictions;
}
- public void add(ScriptStats stats) {
- this.compilations += stats.compilations;
- this.cacheEvictions += stats.cacheEvictions;
- }
-
- public long getCompilations() {
- return compilations;
- }
-
- public long getCacheEvictions() {
- return cacheEvictions;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public ScriptStats(StreamInput in) throws IOException {
compilations = in.readVLong();
cacheEvictions = in.readVLong();
}
@@ -64,6 +47,14 @@ public class ScriptStats implements Streamable, ToXContent {
out.writeVLong(cacheEvictions);
}
+ public long getCompilations() {
+ return compilations;
+ }
+
+ public long getCacheEvictions() {
+ return cacheEvictions;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SCRIPT_STATS);
diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
index 131849ce3e..9aab786aa3 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
@@ -17,7 +17,7 @@
* under the License.
*/
-package org.elasticsearch.search.internal;
+package org.elasticsearch.search;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -39,7 +39,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@@ -53,16 +52,19 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
-import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSearchResult;
-import org.elasticsearch.search.fetch.FetchSubPhase;
-import org.elasticsearch.search.fetch.FetchSubPhaseContext;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
+import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.ScrollContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
@@ -79,10 +81,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-/**
- *
- */
-public class DefaultSearchContext extends SearchContext {
+final class DefaultSearchContext extends SearchContext {
private final long id;
private final ShardSearchRequest request;
@@ -106,9 +105,10 @@ public class DefaultSearchContext extends SearchContext {
private ScrollContext scrollContext;
private boolean explain;
private boolean version = false; // by default, we don't return versions
- private List<String> fieldNames;
+ private StoredFieldsContext storedFields;
private ScriptFieldsContext scriptFields;
private FetchSourceContext fetchSourceContext;
+ private DocValueFieldsContext docValueFieldsContext;
private int from = -1;
private int size = -1;
private SortAndFormats sort;
@@ -124,10 +124,7 @@ public class DefaultSearchContext extends SearchContext {
* things like the type filter or alias filters.
*/
private ParsedQuery originalQuery;
- /**
- * Just like originalQuery but with the filters from types, aliases and slice applied.
- */
- private ParsedQuery filteredQuery;
+
/**
* The query to actually execute.
*/
@@ -147,12 +144,12 @@ public class DefaultSearchContext extends SearchContext {
private volatile long lastAccessTime = -1;
private Profilers profilers;
- private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
+ private final Map<String, SearchExtBuilder> searchExtBuilders = new HashMap<>();
private final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
private final QueryShardContext queryShardContext;
private FetchPhase fetchPhase;
- public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
+ DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
IndexService indexService, IndexShard indexShard, ScriptService scriptService,
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout,
FetchPhase fetchPhase) {
@@ -188,7 +185,7 @@ public class DefaultSearchContext extends SearchContext {
* Should be called before executing the main query and after all other parameters have been set.
*/
@Override
- public void preProcess() {
+ public void preProcess(boolean rewrite) {
if (hasOnlySuggest() ) {
return;
}
@@ -242,20 +239,22 @@ public class DefaultSearchContext extends SearchContext {
if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery()));
}
- filteredQuery(buildFilteredQuery());
- try {
- this.query = searcher().rewrite(this.query);
- } catch (IOException e) {
- throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e);
+ this.query = buildFilteredQuery();
+ if (rewrite) {
+ try {
+ this.query = searcher.rewrite(query);
+ } catch (IOException e) {
+ throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e);
+ }
}
}
- private ParsedQuery buildFilteredQuery() {
- Query searchFilter = searchFilter(queryShardContext.getTypes());
+ private Query buildFilteredQuery() {
+ final Query searchFilter = searchFilter(queryShardContext.getTypes());
if (searchFilter == null) {
- return originalQuery;
+ return originalQuery.query();
}
- Query result;
+ final Query result;
if (Queries.isConstantMatchAllQuery(query())) {
result = new ConstantScoreQuery(searchFilter);
} else {
@@ -264,7 +263,7 @@ public class DefaultSearchContext extends SearchContext {
.add(searchFilter, Occur.FILTER)
.build();
}
- return new ParsedQuery(result, originalQuery);
+ return result;
}
@Override
@@ -387,14 +386,16 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
- public <SubPhaseContext extends FetchSubPhaseContext> SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory<SubPhaseContext> contextFactory) {
- String subPhaseName = contextFactory.getName();
- if (subPhaseContexts.get(subPhaseName) == null) {
- subPhaseContexts.put(subPhaseName, contextFactory.newContextInstance());
- }
- return (SubPhaseContext) subPhaseContexts.get(subPhaseName);
+ public void addSearchExt(SearchExtBuilder searchExtBuilder) {
+ //it's ok to use the writeable name here given that we enforce it to be the same as the name of the element that gets
+ //parsed by the corresponding parser. There is one single name and one single way to retrieve the parsed object from the context.
+ searchExtBuilders.put(searchExtBuilder.getWriteableName(), searchExtBuilder);
}
+ @Override
+ public SearchExtBuilder getSearchExt(String name) {
+ return searchExtBuilders.get(name);
+ }
@Override
public SearchContextHighlight highlight() {
@@ -470,6 +471,17 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
+ public DocValueFieldsContext docValueFieldsContext() {
+ return docValueFieldsContext;
+ }
+
+ @Override
+ public SearchContext docValueFieldsContext(DocValueFieldsContext docValueFieldsContext) {
+ this.docValueFieldsContext = docValueFieldsContext;
+ return this;
+ }
+
+ @Override
public ContextIndexSearcher searcher() {
return this.searcher;
}
@@ -485,11 +497,6 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
- public AnalysisService analysisService() {
- return indexService.analysisService();
- }
-
- @Override
public SimilarityService similarityService() {
return indexService.similarityService();
}
@@ -606,15 +613,6 @@ public class DefaultSearchContext extends SearchContext {
return this;
}
- public ParsedQuery filteredQuery() {
- return filteredQuery;
- }
-
- private void filteredQuery(ParsedQuery filteredQuery) {
- this.filteredQuery = filteredQuery;
- this.query = filteredQuery.query();
- }
-
@Override
public ParsedQuery parsedQuery() {
return this.originalQuery;
@@ -651,21 +649,29 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
- public boolean hasFieldNames() {
- return fieldNames != null;
+ public boolean hasStoredFields() {
+ return storedFields != null && storedFields.fieldNames() != null;
}
@Override
- public List<String> fieldNames() {
- if (fieldNames == null) {
- fieldNames = new ArrayList<>();
- }
- return fieldNames;
+ public boolean hasStoredFieldsContext() {
+ return storedFields != null;
+ }
+
+ @Override
+ public StoredFieldsContext storedFieldsContext() {
+ return storedFields;
+ }
+
+ @Override
+ public SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext) {
+ this.storedFields = storedFieldsContext;
+ return this;
}
@Override
- public void emptyFieldNames() {
- this.fieldNames = Collections.emptyList();
+ public boolean storedFieldsRequested() {
+ return storedFields == null || storedFields.fetchFields();
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/SearchExtBuilder.java b/core/src/main/java/org/elasticsearch/search/SearchExtBuilder.java
new file mode 100644
index 0000000000..8d75216fe1
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/SearchExtBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.io.stream.NamedWriteable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec;
+
+/**
+ * Intermediate serializable representation of a search ext section. To be subclassed by plugins that support
+ * a custom section as part of a search request, which will be provided within the ext element.
+ * Any state needs to be serialized as part of the {@link Writeable#writeTo(StreamOutput)} method and
+ * read from the incoming stream, usually done adding a constructor that takes {@link StreamInput} as
+ * an argument.
+ *
+ * Registration happens through {@link SearchPlugin#getSearchExts()}, which also needs a {@link SearchExtParser} that's able to parse
+ * the incoming request from the REST layer into the proper {@link SearchExtBuilder} subclass.
+ *
+ * {@link #getWriteableName()} must return the same name as the one used for the registration
+ * of the {@link SearchExtSpec}.
+ *
+ * @see SearchExtParser
+ * @see SearchExtSpec
+ */
+public abstract class SearchExtBuilder implements NamedWriteable, ToXContent {
+
+ public abstract int hashCode();
+
+ public abstract boolean equals(Object obj);
+}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchExtParser.java b/core/src/main/java/org/elasticsearch/search/SearchExtParser.java
new file mode 100644
index 0000000000..a2fe4cfe0c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/SearchExtParser.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Defines a parser that is able to parse {@link org.elasticsearch.search.SearchExtBuilder}s
+ * from {@link org.elasticsearch.common.xcontent.XContent}.
+ *
+ * Registration happens through {@link org.elasticsearch.plugins.SearchPlugin#getSearchExts()}, which also needs a {@link SearchExtBuilder}
+ * implementation which is the object that this parser returns when reading an incoming request form the REST layer.
+ *
+ * @see SearchExtBuilder
+ * @see org.elasticsearch.plugins.SearchPlugin.SearchExtSpec
+ */
+@FunctionalInterface
+public interface SearchExtParser<T extends SearchExtBuilder> {
+
+ /**
+ * Parses the supported element placed within the ext section of a search request
+ */
+ T fromXContent(XContentParser parser) throws IOException;
+}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchParseElement.java b/core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java
index 9bf680deb5..dd04145ba7 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchParseElement.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java
@@ -19,13 +19,14 @@
package org.elasticsearch.search;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.common.xcontent.ParseFieldRegistry;
/**
- *
+ * Extensions to ParseFieldRegistry to make Guice happy.
*/
-public interface SearchParseElement {
+public class SearchExtRegistry extends ParseFieldRegistry<SearchExtParser> {
- void parse(XContentParser parser, SearchContext context) throws Exception;
+ public SearchExtRegistry() {
+ super("ext");
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java
index ab36f03639..99b3d4c889 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchModule.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java
@@ -23,7 +23,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.builders.ShapeBuilders;
-import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
@@ -93,8 +92,8 @@ import org.elasticsearch.plugins.SearchPlugin.FetchPhaseConstructionContext;
import org.elasticsearch.plugins.SearchPlugin.PipelineAggregationSpec;
import org.elasticsearch.plugins.SearchPlugin.QuerySpec;
import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec;
+import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec;
import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec;
-import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorParsers;
@@ -242,7 +241,6 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;
-import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
@@ -287,7 +285,7 @@ import static java.util.Objects.requireNonNull;
/**
* Sets up things that can be done at search time like queries, aggregations, and suggesters.
*/
-public class SearchModule extends AbstractModule {
+public class SearchModule {
public static final Setting<Integer> INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting("indices.query.bool.max_clause_count",
1024, 1, Integer.MAX_VALUE, Setting.Property.NodeScope);
@@ -306,6 +304,7 @@ public class SearchModule extends AbstractModule {
"moving_avg_model");
private final List<FetchSubPhase> fetchSubPhases = new ArrayList<>();
+ private final SearchExtRegistry searchExtParserRegistry = new SearchExtRegistry();
private final Settings settings;
private final List<Entry> namedWriteables = new ArrayList<>();
@@ -326,8 +325,9 @@ public class SearchModule extends AbstractModule {
registerAggregations(plugins);
registerPipelineAggregations(plugins);
registerFetchSubPhases(plugins);
+ registerSearchExts(plugins);
registerShapes();
- searchRequestParsers = new SearchRequestParsers(queryParserRegistry, aggregatorParsers, getSuggesters());
+ searchRequestParsers = new SearchRequestParsers(queryParserRegistry, aggregatorParsers, getSuggesters(), searchExtParserRegistry);
}
public List<Entry> getNamedWriteables() {
@@ -374,16 +374,6 @@ public class SearchModule extends AbstractModule {
return aggregatorParsers;
}
-
- @Override
- protected void configure() {
- if (false == transportClient) {
- bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry);
- bind(SearchRequestParsers.class).toInstance(searchRequestParsers);
- configureSearch();
- }
- }
-
private void registerAggregations(List<SearchPlugin> plugins) {
registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, new AvgParser())
.addResultReader(InternalAvg::new));
@@ -570,13 +560,6 @@ public class SearchModule extends AbstractModule {
}
}
- protected void configureSearch() {
- // configure search private classes...
- bind(SearchPhaseController.class).asEagerSingleton();
- bind(FetchPhase.class).toInstance(new FetchPhase(fetchSubPhases));
- bind(SearchTransportService.class).asEagerSingleton();
- }
-
private void registerShapes() {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
ShapeBuilders.register(namedWriteables);
@@ -725,6 +708,15 @@ public class SearchModule extends AbstractModule {
registerFromPlugin(plugins, p -> p.getFetchSubPhases(context), this::registerFetchSubPhase);
}
+ private void registerSearchExts(List<SearchPlugin> plugins) {
+ registerFromPlugin(plugins, SearchPlugin::getSearchExts, this::registerSearchExt);
+ }
+
+ private void registerSearchExt(SearchExtSpec<?> spec) {
+ searchExtParserRegistry.register(spec.getParser(), spec.getName());
+ namedWriteables.add(new Entry(SearchExtBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
+ }
+
private void registerFetchSubPhase(FetchSubPhase subPhase) {
Class<?> subPhaseClass = subPhase.getClass();
if (fetchSubPhases.stream().anyMatch(p -> p.getClass().equals(subPhaseClass))) {
@@ -804,4 +796,12 @@ public class SearchModule extends AbstractModule {
queryParserRegistry.register(spec.getParser(), spec.getName());
namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
}
+
+ public FetchPhase getFetchPhase() {
+ return new FetchPhase(fetchSubPhases);
+ }
+
+ public SearchExtRegistry getSearchExtRegistry() {
+ return searchExtParserRegistry;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchPhase.java b/core/src/main/java/org/elasticsearch/search/SearchPhase.java
index 48c041f12f..33260706b3 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchPhase.java
@@ -21,22 +21,18 @@ package org.elasticsearch.search;
import org.elasticsearch.search.internal.SearchContext;
-import java.util.Collections;
-import java.util.Map;
-
/**
- *
+ * Represents a phase of a search request e.g. query, fetch etc.
*/
public interface SearchPhase {
- default Map<String, ? extends SearchParseElement> parseElements() {
- return Collections.emptyMap();
- }
-
/**
* Performs pre processing of the search context before the execute.
*/
void preProcess(SearchContext context);
+ /**
+ * Executes the search phase
+ */
void execute(SearchContext context);
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java b/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java
index 83eebd125d..ba3afc1577 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java
@@ -37,7 +37,8 @@ public class SearchRequestParsers {
/**
* Query parsers that may be used in search requests.
* @see org.elasticsearch.index.query.QueryParseContext
- * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers, Suggesters)
+ * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers,
+ * Suggesters, SearchExtRegistry)
*/
public final IndicesQueriesRegistry queryParsers;
@@ -45,20 +46,29 @@ public class SearchRequestParsers {
// and pipeline agg parsers should be here
/**
* Agg and pipeline agg parsers that may be used in search requests.
- * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers, Suggesters)
+ * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers,
+ * Suggesters, SearchExtRegistry)
*/
public final AggregatorParsers aggParsers;
// TODO: Suggesters should be removed and the underlying map moved here
/**
* Suggesters that may be used in search requests.
- * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers, Suggesters)
+ * @see org.elasticsearch.search.builder.SearchSourceBuilder#fromXContent(QueryParseContext, AggregatorParsers,
+ * Suggesters, SearchExtRegistry)
*/
public final Suggesters suggesters;
- public SearchRequestParsers(IndicesQueriesRegistry queryParsers, AggregatorParsers aggParsers, Suggesters suggesters) {
+ /**
+ * Pluggable section that can be parsed out of a search section, within the ext element
+ */
+ public final SearchExtRegistry searchExtParsers;
+
+ public SearchRequestParsers(IndicesQueriesRegistry queryParsers, AggregatorParsers aggParsers, Suggesters suggesters,
+ SearchExtRegistry searchExtParsers) {
this.queryParsers = queryParsers;
this.aggParsers = aggParsers;
this.suggesters = suggesters;
+ this.searchExtParsers = searchExtParsers;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 5fd0a1417b..4334c5cf54 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -21,7 +21,6 @@ package org.elasticsearch.search;
import com.carrotsearch.hppc.ObjectFloatHashMap;
import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@@ -30,9 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
-import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -40,9 +37,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentLocation;
-import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
@@ -68,11 +62,8 @@ import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.DocValueField;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
-import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
@@ -103,13 +94,9 @@ import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
-import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
-/**
- *
- */
public class SearchService extends AbstractLifecycleComponent implements IndexEventListener {
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
@@ -149,14 +136,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
private final ConcurrentMapLong<SearchContext> activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
- private final Map<String, SearchParseElement> elementParsers;
-
private final ParseFieldMatcher parseFieldMatcher;
- @Inject
- public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService,
+ public SearchService(ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) {
- super(settings);
+ super(clusterService.getSettings());
this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
@@ -169,16 +153,10 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis();
- Map<String, SearchParseElement> elementParsers = new HashMap<>();
- elementParsers.putAll(dfsPhase.parseElements());
- elementParsers.putAll(queryPhase.parseElements());
- elementParsers.putAll(fetchPhase.parseElements());
- this.elementParsers = unmodifiableMap(elementParsers);
-
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME);
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
- clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
+ clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
}
private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) {
@@ -233,6 +211,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws IOException {
final SearchContext context = createAndPutContext(request);
+ context.incRef();
try {
contextProcessing(context);
dfsPhase.execute(context);
@@ -262,6 +241,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws IOException {
final SearchContext context = createAndPutContext(request);
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
+ context.incRef();
try {
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
@@ -295,6 +275,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) {
final SearchContext context = findContext(request.id());
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
+ context.incRef();
try {
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
@@ -316,11 +297,13 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id());
- contextProcessing(context);
- context.searcher().setAggregatedDfs(request.dfs());
IndexShard indexShard = context.indexShard();
SearchOperationListener operationListener = indexShard.getSearchOperationListener();
+ context.incRef();
try {
+ contextProcessing(context);
+ context.searcher().setAggregatedDfs(request.dfs());
+
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
queryPhase.execute(context);
@@ -354,8 +337,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws IOException {
final SearchContext context = createAndPutContext(request);
- contextProcessing(context);
+ context.incRef();
try {
+ contextProcessing(context);
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
@@ -393,9 +377,10 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id());
- contextProcessing(context);
- context.searcher().setAggregatedDfs(request.dfs());
+ context.incRef();
try {
+ contextProcessing(context);
+ context.searcher().setAggregatedDfs(request.dfs());
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
@@ -433,8 +418,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) {
final SearchContext context = findContext(request.id());
- contextProcessing(context);
+ context.incRef();
try {
+ contextProcessing(context);
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
processScroll(request, context);
operationListener.onPreQueryPhase(context);
@@ -461,7 +447,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
throw ExceptionsHelper.convertToRuntime(e);
}
operationListener.onFetchPhase(context, System.nanoTime() - time2);
- return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
+ return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
+ context.shardTarget());
} catch (Exception e) {
logger.trace("Fetch phase failed", e);
processFailure(context, e);
@@ -473,9 +460,10 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public FetchSearchResult executeFetchPhase(ShardFetchRequest request) {
final SearchContext context = findContext(request.id());
- contextProcessing(context);
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
+ context.incRef();
try {
+ contextProcessing(context);
if (request.lastEmittedDoc() != null) {
context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
}
@@ -528,16 +516,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
}
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
- IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
- IndexShard indexShard = indexService.getShard(request.shardId().getId());
- SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
- Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
-
- DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
- indexService,
- indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
- defaultSearchTimeout, fetchPhase);
+ DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher);
SearchContext.setCurrent(context);
try {
request.rewrite(context.getQueryShardContext());
@@ -580,6 +560,18 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
return context;
}
+ public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) {
+ IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
+ IndexShard indexShard = indexService.getShard(request.shardId().getId());
+ SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
+ Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
+
+ return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
+ indexService,
+ indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
+ timeout, fetchPhase);
+ }
+
private void freeAllContextForIndex(Index index) {
assert index != null;
for (SearchContext ctx : activeContexts.values()) {
@@ -593,6 +585,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public boolean freeContext(long id) {
final SearchContext context = removeContext(id);
if (context != null) {
+ assert context.refCount() > 0 : " refCount must be > 0: " + context.refCount();
try {
context.indexShard().getSearchOperationListener().onFreeContext(context);
if (context.scrollContext() != null) {
@@ -624,9 +617,13 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
}
private void cleanContext(SearchContext context) {
- assert context == SearchContext.current();
- context.clearReleasables(Lifetime.PHASE);
- SearchContext.removeCurrent();
+ try {
+ assert context == SearchContext.current();
+ context.clearReleasables(Lifetime.PHASE);
+ SearchContext.removeCurrent();
+ } finally {
+ context.decRef();
+ }
}
private void processFailure(SearchContext context, Exception e) {
@@ -719,9 +716,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
throw new SearchContextException(context, "failed to create RescoreSearchContext", e);
}
}
- if (source.storedFields() != null) {
- context.fieldNames().addAll(source.storedFields());
- }
if (source.explain() != null) {
context.explain(source.explain());
}
@@ -729,11 +723,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
context.fetchSourceContext(source.fetchSource());
}
if (source.docValueFields() != null) {
- DocValueFieldsContext docValuesFieldsContext = context.getFetchSubPhaseContext(DocValueFieldsFetchSubPhase.CONTEXT_FACTORY);
- for (String field : source.docValueFields()) {
- docValuesFieldsContext.add(new DocValueField(field));
- }
- docValuesFieldsContext.setHitExecutionNeeded(true);
+ context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields()));
}
if (source.highlighter() != null) {
HighlightBuilder highlightBuilder = source.highlighter();
@@ -751,43 +741,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
}
}
if (source.ext() != null) {
- XContentParser extParser = null;
- try {
- extParser = XContentFactory.xContent(source.ext()).createParser(source.ext());
- XContentParser.Token token = extParser.nextToken();
- String currentFieldName = null;
- while ((token = extParser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = extParser.currentName();
- } else {
- SearchParseElement parseElement = this.elementParsers.get(currentFieldName);
- if (parseElement == null) {
- if (currentFieldName != null && currentFieldName.equals("suggest")) {
- throw new SearchParseException(context,
- "suggest is not supported in [ext], please use SearchSourceBuilder#suggest(SuggestBuilder) instead",
- extParser.getTokenLocation());
- }
- throw new SearchParseException(context, "Unknown element [" + currentFieldName + "] in [ext]",
- extParser.getTokenLocation());
- } else {
- parseElement.parse(extParser, context);
- }
- }
- }
- } catch (Exception e) {
- String sSource = "_na_";
- try {
- sSource = source.toString();
- } catch (Exception inner) {
- e.addSuppressed(inner);
- // ignore
- }
- XContentLocation location = extParser != null ? extParser.getTokenLocation() : null;
- throw new SearchParseException(context, "failed to parse ext source [" + sSource + "]", location, e);
- } finally {
- if (extParser != null) {
- extParser.close();
- }
+ for (SearchExtBuilder searchExtBuilder : source.ext()) {
+ context.addSearchExt(searchExtBuilder);
}
}
if (source.version() != null) {
@@ -813,6 +768,18 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
}
context.sliceBuilder(source.slice());
}
+
+ if (source.storedFields() != null) {
+ if (source.storedFields().fetchFields() == false) {
+ if (context.version()) {
+ throw new SearchContextException(context, "`stored_fields` cannot be disabled if version is requested");
+ }
+ if (context.sourceRequested()) {
+ throw new SearchContextException(context, "`stored_fields` cannot be disabled if _source is requested");
+ }
+ }
+ context.storedFieldsContext(source.storedFields());
+ }
}
/**
@@ -891,7 +858,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
continue;
}
if ((time - lastAccessTime > context.keepAlive())) {
- logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, lastAccessTime, context.keepAlive());
+ logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time,
+ lastAccessTime, context.keepAlive());
freeContext(context.id());
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
index 5dc2937431..8acd4f1375 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
@@ -40,7 +40,7 @@ import java.util.Collections;
import java.util.List;
/**
- *
+ * Aggregation phase of a search request, used to collect aggregations
*/
public class AggregationPhase implements SearchPhase {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
index b7cab31974..7c6ebae740 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
@@ -25,11 +25,9 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PackedLongValues;
-import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.aggregations.Aggregator;
-import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
@@ -119,6 +117,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector {
@Override
public void preCollection() throws IOException {
+ collector.preCollection();
}
@Override
@@ -145,7 +144,6 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector {
}
this.selectedBuckets = hash;
- collector.preCollection();
boolean needsScores = collector.needsScores();
Weight weight = null;
if (needsScores) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java
index 2336923467..90316c1a00 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java
@@ -48,7 +48,7 @@ import java.util.List;
* {@link BestDocsDeferringCollector#createTopDocsCollector(int)} is designed to
* be overridden and allows subclasses to choose a custom collector
* implementation for determining the top N matches.
- *
+ *
*/
public class BestDocsDeferringCollector extends DeferringBucketCollector implements Releasable {
@@ -61,7 +61,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
/**
* Sole constructor.
- *
+ *
* @param shardSize
* The number of top-scoring docs to collect for each bucket
*/
@@ -111,6 +111,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
@Override
public void preCollection() throws IOException {
+ deferred.preCollection();
}
@Override
@@ -125,7 +126,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
}
private void runDeferredAggs() throws IOException {
- deferred.preCollection();
List<ScoreDoc> allDocs = new ArrayList<>(shardSize);
for (int i = 0; i < perBucketSamples.size(); i++) {
@@ -135,14 +135,14 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
}
perBucketSample.getMatches(allDocs);
}
-
+
// Sort the top matches by docID for the benefit of deferred collector
ScoreDoc[] docsArr = allDocs.toArray(new ScoreDoc[allDocs.size()]);
Arrays.sort(docsArr, new Comparator<ScoreDoc>() {
@Override
public int compare(ScoreDoc o1, ScoreDoc o2) {
if(o1.doc == o2.doc){
- return o1.shardIndex - o2.shardIndex;
+ return o1.shardIndex - o2.shardIndex;
}
return o1.doc - o2.doc;
}
@@ -256,7 +256,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
currentScore = scoreDoc.score;
currentDocId = rebased;
// We stored the bucket ID in Lucene's shardIndex property
- // for convenience.
+ // for convenience.
leafCollector.collect(rebased, scoreDoc.shardIndex);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java
index 1ae31e09ba..e669ee8b9d 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java
@@ -19,11 +19,11 @@
package org.elasticsearch.search.aggregations.bucket.geogrid;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -65,16 +65,17 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token == XContentParser.Token.VALUE_NUMBER || token == XContentParser.Token.VALUE_STRING) {
- if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_PRECISION)) {
+ if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_PRECISION)) {
otherOptions.put(GeoHashGridParams.FIELD_PRECISION, parser.intValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_SIZE)) {
+ } else if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_SIZE)) {
otherOptions.put(GeoHashGridParams.FIELD_SIZE, parser.intValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, GeoHashGridParams.FIELD_SHARD_SIZE)) {
+ } else if (context.matchField(currentFieldName, GeoHashGridParams.FIELD_SHARD_SIZE)) {
otherOptions.put(GeoHashGridParams.FIELD_SHARD_SIZE, parser.intValue());
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java
index e3a3ea7576..952a0e2568 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java
@@ -19,11 +19,11 @@
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -79,10 +79,11 @@ public class DateHistogramParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token.isValue()) {
- if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {
+ if (context.matchField(currentFieldName, Histogram.INTERVAL_FIELD)) {
if (token == XContentParser.Token.VALUE_STRING) {
otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
return true;
@@ -90,13 +91,13 @@ public class DateHistogramParser extends NumericValuesSourceParser {
otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue());
return true;
}
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.KEYED_FIELD)) {
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.OFFSET_FIELD)) {
if (token == XContentParser.Token.VALUE_STRING) {
otherOptions.put(Histogram.OFFSET_FIELD,
DateHistogramAggregationBuilder.parseStringOffset(parser.text()));
@@ -109,7 +110,7 @@ public class DateHistogramParser extends NumericValuesSourceParser {
return false;
}
} else if (token == XContentParser.Token.START_OBJECT) {
- if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {
+ if (context.matchField(currentFieldName, Histogram.ORDER_FIELD)) {
InternalOrder order = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@@ -127,9 +128,10 @@ public class DateHistogramParser extends NumericValuesSourceParser {
}
otherOptions.put(Histogram.ORDER_FIELD, order);
return true;
- } else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {
+ } else if (context.matchField(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {
try {
- otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher));
+ otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD,
+ ExtendedBounds.PARSER.apply(parser, context::getParseFieldMatcher));
} catch (Exception e) {
throw new ParsingException(parser.getTokenLocation(), "Error parsing [{}]", e, aggregationName);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java
index 69aed3e733..f27677a1a6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java
@@ -19,13 +19,13 @@
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -85,26 +85,27 @@ public class HistogramParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token.isValue()) {
- if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {
+ if (context.matchField(currentFieldName, Histogram.INTERVAL_FIELD)) {
otherOptions.put(Histogram.INTERVAL_FIELD, parser.doubleValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.KEYED_FIELD)) {
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {
+ } else if (context.matchField(currentFieldName, Histogram.OFFSET_FIELD)) {
otherOptions.put(Histogram.OFFSET_FIELD, parser.doubleValue());
return true;
} else {
return false;
}
} else if (token == XContentParser.Token.START_OBJECT) {
- if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {
+ if (context.matchField(currentFieldName, Histogram.ORDER_FIELD)) {
InternalOrder order = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@@ -122,8 +123,8 @@ public class HistogramParser extends NumericValuesSourceParser {
}
otherOptions.put(Histogram.ORDER_FIELD, order);
return true;
- } else if (parseFieldMatcher.match(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) {
- double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, () -> parseFieldMatcher);
+ } else if (context.matchField(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) {
+ double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, context::getParseFieldMatcher);
otherOptions.put(Histogram.EXTENDED_BOUNDS_FIELD, bounds);
return true;
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java
index fff81db130..5d6844ebbd 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.bucket.missing;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -35,8 +35,8 @@ public class MissingParser extends AnyValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java
index 6ebd413d2d..c8cb2c7671 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java
@@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -65,20 +66,21 @@ public class RangeParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token == XContentParser.Token.START_ARRAY) {
- if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) {
+ if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) {
List<Range> ranges = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- Range range = parseRange(parser, parseFieldMatcher);
+ Range range = parseRange(parser, context.getParseFieldMatcher());
ranges.add(range);
}
otherOptions.put(RangeAggregator.RANGES_FIELD, ranges);
return true;
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if (parseFieldMatcher.match(currentFieldName, RangeAggregator.KEYED_FIELD)) {
+ if (context.matchField(currentFieldName, RangeAggregator.KEYED_FIELD)) {
boolean keyed = parser.booleanValue();
otherOptions.put(RangeAggregator.KEYED_FIELD, keyed);
return true;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java
index 3d4c02bc6f..f0dfec2312 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java
@@ -64,6 +64,14 @@ public class InternalDateRange extends InternalRange<InternalDateRange.Bucket, I
return Double.isInfinite(((Number) to).doubleValue()) ? null : new DateTime(((Number) to).longValue(), DateTimeZone.UTC);
}
+ private Double internalGetFrom() {
+ return from;
+ }
+
+ private Double internalGetTo() {
+ return to;
+ }
+
@Override
protected InternalRange.Factory<Bucket, ?> getFactory() {
return FACTORY;
@@ -109,8 +117,8 @@ public class InternalDateRange extends InternalRange<InternalDateRange.Bucket, I
@Override
public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
- return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(),
- prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormat());
+ return new Bucket(prototype.getKey(), prototype.internalGetFrom(), prototype.internalGetTo(),
+ prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormat());
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java
index 16cb909ea0..677731d64e 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.range.geodistance;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser;
import org.elasticsearch.search.aggregations.support.GeoPointParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -110,28 +110,29 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
- if (geoPointParser.token(aggregationName, currentFieldName, token, parser, parseFieldMatcher, otherOptions)) {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
+ if (geoPointParser.token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
return true;
} else if (token == XContentParser.Token.VALUE_STRING) {
- if (parseFieldMatcher.match(currentFieldName, UNIT_FIELD)) {
+ if (context.matchField(currentFieldName, UNIT_FIELD)) {
DistanceUnit unit = DistanceUnit.fromString(parser.text());
otherOptions.put(UNIT_FIELD, unit);
return true;
- } else if (parseFieldMatcher.match(currentFieldName, DISTANCE_TYPE_FIELD)) {
+ } else if (context.matchField(currentFieldName, DISTANCE_TYPE_FIELD)) {
GeoDistance distanceType = GeoDistance.fromString(parser.text());
otherOptions.put(DISTANCE_TYPE_FIELD, distanceType);
return true;
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if (parseFieldMatcher.match(currentFieldName, RangeAggregator.KEYED_FIELD)) {
+ if (context.matchField(currentFieldName, RangeAggregator.KEYED_FIELD)) {
boolean keyed = parser.booleanValue();
otherOptions.put(RangeAggregator.KEYED_FIELD, keyed);
return true;
}
} else if (token == XContentParser.Token.START_ARRAY) {
- if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) {
+ if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) {
List<Range> ranges = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String fromAsStr = null;
@@ -144,17 +145,17 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser {
if (token == XContentParser.Token.FIELD_NAME) {
toOrFromOrKey = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
- if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) {
+ if (context.matchField(toOrFromOrKey, Range.FROM_FIELD)) {
from = parser.doubleValue();
- } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) {
+ } else if (context.matchField(toOrFromOrKey, Range.TO_FIELD)) {
to = parser.doubleValue();
}
} else if (token == XContentParser.Token.VALUE_STRING) {
- if (parseFieldMatcher.match(toOrFromOrKey, Range.KEY_FIELD)) {
+ if (context.matchField(toOrFromOrKey, Range.KEY_FIELD)) {
key = parser.text();
- } else if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) {
+ } else if (context.matchField(toOrFromOrKey, Range.FROM_FIELD)) {
fromAsStr = parser.text();
- } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) {
+ } else if (context.matchField(toOrFromOrKey, Range.TO_FIELD)) {
toAsStr = parser.text();
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java
index 8445fb2d45..5d95f0dd49 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java
@@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.BytesValuesSourceParser;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;
import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder.Range;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
@@ -102,21 +103,22 @@ public class IpRangeParser extends BytesValuesSourceParser {
@Override
protected boolean token(String aggregationName, String currentFieldName,
- Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher,
- Map<ParseField, Object> otherOptions) throws IOException {
- if (parseFieldMatcher.match(currentFieldName, RangeAggregator.RANGES_FIELD)) {
+ Token token,
+ XContentParseContext context,
+ Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
+ if (context.matchField(currentFieldName, RangeAggregator.RANGES_FIELD)) {
if (parser.currentToken() != Token.START_ARRAY) {
throw new ParsingException(parser.getTokenLocation(), "[ranges] must be passed as an array, but got a " + token);
}
List<Range> ranges = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- Range range = parseRange(parser, parseFieldMatcher);
+ Range range = parseRange(parser, context.getParseFieldMatcher());
ranges.add(range);
}
otherOptions.put(RangeAggregator.RANGES_FIELD, ranges);
return true;
- } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.KEYED_FIELD)) {
+ } else if (context.matchField(parser.currentName(), RangeAggregator.KEYED_FIELD)) {
otherOptions.put(RangeAggregator.KEYED_FIELD, parser.booleanValue());
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java
index f495071f6d..a62035d723 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java
@@ -20,9 +20,9 @@ package org.elasticsearch.search.aggregations.bucket.sampler;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -57,20 +57,21 @@ public class DiversifiedSamplerParser extends AnyValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token == XContentParser.Token.VALUE_NUMBER) {
- if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.SHARD_SIZE_FIELD)) {
+ if (context.matchField(currentFieldName, SamplerAggregator.SHARD_SIZE_FIELD)) {
int shardSize = parser.intValue();
otherOptions.put(SamplerAggregator.SHARD_SIZE_FIELD, shardSize);
return true;
- } else if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD)) {
+ } else if (context.matchField(currentFieldName, SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD)) {
int maxDocsPerValue = parser.intValue();
otherOptions.put(SamplerAggregator.MAX_DOCS_PER_VALUE_FIELD, maxDocsPerValue);
return true;
}
} else if (token == XContentParser.Token.VALUE_STRING) {
- if (parseFieldMatcher.match(currentFieldName, SamplerAggregator.EXECUTION_HINT_FIELD)) {
+ if (context.matchField(currentFieldName, SamplerAggregator.EXECUTION_HINT_FIELD)) {
String executionHint = parser.text();
otherOptions.put(SamplerAggregator.EXECUTION_HINT_FIELD, executionHint);
return true;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java
index ba87f0917a..0f08cf0a0a 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.significant;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.ParseFieldRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
@@ -33,6 +32,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.AbstractTermsParser;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -81,17 +81,18 @@ public class SignificantTermsParser extends AbstractTermsParser {
}
@Override
- public boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Token token,
- String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException {
+ public boolean parseSpecial(String aggregationName, XContentParseContext context, Token token,
+ String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException {
if (token == XContentParser.Token.START_OBJECT) {
SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry
- .lookupReturningNullIfNotFound(currentFieldName, parseFieldMatcher);
+ .lookupReturningNullIfNotFound(currentFieldName, context.getParseFieldMatcher());
if (significanceHeuristicParser != null) {
- SignificanceHeuristic significanceHeuristic = significanceHeuristicParser.parse(parser, parseFieldMatcher);
+ SignificanceHeuristic significanceHeuristic = significanceHeuristicParser.parse(context);
otherOptions.put(SignificantTermsAggregationBuilder.HEURISTIC, significanceHeuristic);
return true;
- } else if (parseFieldMatcher.match(currentFieldName, SignificantTermsAggregationBuilder.BACKGROUND_FILTER)) {
- QueryParseContext queryParseContext = new QueryParseContext(queriesRegistry, parser, parseFieldMatcher);
+ } else if (context.matchField(currentFieldName, SignificantTermsAggregationBuilder.BACKGROUND_FILTER)) {
+ QueryParseContext queryParseContext = new QueryParseContext(context.getDefaultScriptLanguage(), queriesRegistry,
+ context.getParser(), context.getParseFieldMatcher());
Optional<QueryBuilder> filter = queryParseContext.parseInnerQueryBuilder();
if (filter.isPresent()) {
otherOptions.put(SignificantTermsAggregationBuilder.BACKGROUND_FILTER, filter.get());
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java
index d8610dc05c..3ae26639aa 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java
@@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardException;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import java.io.IOException;
@@ -113,13 +113,13 @@ public class GND extends NXYSignificanceHeuristic {
}
@Override
- public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
- throws IOException, QueryShardException {
+ public SignificanceHeuristic parse(XContentParseContext context) throws IOException, QueryShardException {
+ XContentParser parser = context.getParser();
String givenName = parser.currentName();
boolean backgroundIsSuperset = true;
XContentParser.Token token = parser.nextToken();
while (!token.equals(XContentParser.Token.END_OBJECT)) {
- if (parseFieldMatcher.match(parser.currentName(), BACKGROUND_IS_SUPERSET)) {
+ if (context.matchField(parser.currentName(), BACKGROUND_IS_SUPERSET)) {
parser.nextToken();
backgroundIsSuperset = parser.booleanValue();
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java
index d426b14662..58f8060a10 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java
@@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardException;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import java.io.IOException;
@@ -104,8 +104,9 @@ public class JLHScore extends SignificanceHeuristic {
return builder;
}
- public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
+ public static SignificanceHeuristic parse(XContentParseContext context)
throws IOException, QueryShardException {
+ XContentParser parser = context.getParser();
// move to the closing bracket
if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) {
throw new ElasticsearchParseException(
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java
index 3036c57865..d6064ca37f 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java
@@ -23,12 +23,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardException;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import java.io.IOException;
@@ -152,17 +152,18 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic {
public abstract static class NXYParser implements SignificanceHeuristicParser {
@Override
- public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
+ public SignificanceHeuristic parse(XContentParseContext context)
throws IOException, QueryShardException {
+ XContentParser parser = context.getParser();
String givenName = parser.currentName();
boolean includeNegatives = false;
boolean backgroundIsSuperset = true;
XContentParser.Token token = parser.nextToken();
while (!token.equals(XContentParser.Token.END_OBJECT)) {
- if (parseFieldMatcher.match(parser.currentName(), INCLUDE_NEGATIVES_FIELD)) {
+ if (context.matchField(parser.currentName(), INCLUDE_NEGATIVES_FIELD)) {
parser.nextToken();
includeNegatives = parser.booleanValue();
- } else if (parseFieldMatcher.match(parser.currentName(), BACKGROUND_IS_SUPERSET)) {
+ } else if (context.matchField(parser.currentName(), BACKGROUND_IS_SUPERSET)) {
parser.nextToken();
backgroundIsSuperset = parser.booleanValue();
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java
index 7bc117a0ec..c7e5c7ead6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java
@@ -22,12 +22,12 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardException;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import java.io.IOException;
@@ -56,8 +56,9 @@ public class PercentageScore extends SignificanceHeuristic {
return builder;
}
- public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
+ public static SignificanceHeuristic parse(XContentParseContext context)
throws IOException, QueryShardException {
+ XContentParser parser = context.getParser();
// move to the closing bracket
if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) {
throw new ElasticsearchParseException("failed to parse [percentage] significance heuristic. expected an empty object, but got [{}] instead", parser.currentToken());
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
index 1f99ebad21..c933f9ef59 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
@@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLoggerFactory;
@@ -35,6 +34,7 @@ import org.elasticsearch.script.Script.ScriptField;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
@@ -146,8 +146,9 @@ public class ScriptHeuristic extends SignificanceHeuristic {
return Objects.equals(script, other.script);
}
- public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
+ public static SignificanceHeuristic parse(XContentParseContext context)
throws IOException, QueryShardException {
+ XContentParser parser = context.getParser();
String heuristicName = parser.currentName();
Script script = null;
XContentParser.Token token;
@@ -156,8 +157,8 @@ public class ScriptHeuristic extends SignificanceHeuristic {
if (token.equals(XContentParser.Token.FIELD_NAME)) {
currentFieldName = parser.currentName();
} else {
- if (parseFieldMatcher.match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseFieldMatcher);
+ if (context.matchField(currentFieldName, ScriptField.SCRIPT)) {
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else {
throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. unknown object [{}]", heuristicName, currentFieldName);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java
index 1e1f4bfd48..26fd552a6b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java
@@ -20,9 +20,9 @@
package org.elasticsearch.search.aggregations.bucket.significant.heuristics;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import java.io.IOException;
@@ -31,6 +31,5 @@ import java.io.IOException;
*/
@FunctionalInterface
public interface SignificanceHeuristicParser {
- SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException,
- ParsingException;
+ SignificanceHeuristic parse(XContentParseContext context) throws IOException, ParsingException;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java
index 3f27c4f1c6..a106cea3a1 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java
@@ -20,13 +20,13 @@
package org.elasticsearch.search.aggregations.bucket.terms;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
@@ -89,47 +89,48 @@ public abstract class AbstractTermsParser extends AnyValuesSourceParser {
Map<ParseField, Object> otherOptions);
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
- if (incExcParser.token(currentFieldName, token, parser, parseFieldMatcher, otherOptions)) {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
+ if (incExcParser.token(currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
return true;
} else if (token == XContentParser.Token.VALUE_STRING) {
- if (parseFieldMatcher.match(currentFieldName, EXECUTION_HINT_FIELD_NAME)) {
+ if (context.matchField(currentFieldName, EXECUTION_HINT_FIELD_NAME)) {
otherOptions.put(EXECUTION_HINT_FIELD_NAME, parser.text());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, SubAggCollectionMode.KEY)) {
- otherOptions.put(SubAggCollectionMode.KEY, SubAggCollectionMode.parse(parser.text(), parseFieldMatcher));
+ } else if (context.matchField(currentFieldName, SubAggCollectionMode.KEY)) {
+ otherOptions.put(SubAggCollectionMode.KEY, SubAggCollectionMode.parse(parser.text(), context.getParseFieldMatcher()));
return true;
- } else if (parseFieldMatcher.match(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) {
+ } else if (context.matchField(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) {
otherOptions.put(REQUIRED_SIZE_FIELD_NAME, parser.intValue());
return true;
- } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) {
+ } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) {
return true;
}
} else if (token == XContentParser.Token.VALUE_NUMBER) {
- if (parseFieldMatcher.match(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) {
+ if (context.matchField(currentFieldName, REQUIRED_SIZE_FIELD_NAME)) {
otherOptions.put(REQUIRED_SIZE_FIELD_NAME, parser.intValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, SHARD_SIZE_FIELD_NAME)) {
+ } else if (context.matchField(currentFieldName, SHARD_SIZE_FIELD_NAME)) {
otherOptions.put(SHARD_SIZE_FIELD_NAME, parser.intValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, MIN_DOC_COUNT_FIELD_NAME)) {
+ } else if (context.matchField(currentFieldName, MIN_DOC_COUNT_FIELD_NAME)) {
otherOptions.put(MIN_DOC_COUNT_FIELD_NAME, parser.longValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, SHARD_MIN_DOC_COUNT_FIELD_NAME)) {
+ } else if (context.matchField(currentFieldName, SHARD_MIN_DOC_COUNT_FIELD_NAME)) {
otherOptions.put(SHARD_MIN_DOC_COUNT_FIELD_NAME, parser.longValue());
return true;
- } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) {
+ } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) {
return true;
}
- } else if (parseSpecial(aggregationName, parser, parseFieldMatcher, token, currentFieldName, otherOptions)) {
+ } else if (parseSpecial(aggregationName, context, token, currentFieldName, otherOptions)) {
return true;
}
return false;
}
- public abstract boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher,
- XContentParser.Token token, String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException;
+ public abstract boolean parseSpecial(String aggregationName, XContentParseContext context,
+ Token token, String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException;
protected abstract TermsAggregator.BucketCountThresholds getDefaultBucketCountThresholds();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java
index 2a67dbe221..bf8b06ab65 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.terms;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
@@ -27,6 +26,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -75,15 +75,16 @@ public class TermsParser extends AbstractTermsParser {
}
@Override
- public boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Token token,
- String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException {
+ public boolean parseSpecial(String aggregationName, XContentParseContext context, Token token,
+ String currentFieldName, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token == XContentParser.Token.START_OBJECT) {
- if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) {
+ if (context.matchField(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) {
otherOptions.put(TermsAggregationBuilder.ORDER_FIELD, Collections.singletonList(parseOrderParam(aggregationName, parser)));
return true;
}
} else if (token == XContentParser.Token.START_ARRAY) {
- if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) {
+ if (context.matchField(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) {
List<OrderElement> orderElements = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.START_OBJECT) {
@@ -98,7 +99,7 @@ public class TermsParser extends AbstractTermsParser {
return true;
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR)) {
+ if (context.matchField(currentFieldName, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR)) {
otherOptions.put(TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR, parser.booleanValue());
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java
index b4f9261b1e..bc6f762295 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.avg;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -38,8 +38,8 @@ public class AvgParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java
index 3a2e6a2072..e40e076799 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java
@@ -20,10 +20,9 @@
package org.elasticsearch.search.aggregations.metrics.cardinality;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -51,13 +50,13 @@ public class CardinalityParser extends AnyValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
if (token.isValue()) {
- if (parseFieldMatcher.match(currentFieldName, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD)) {
- otherOptions.put(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD, parser.longValue());
+ if (context.matchField(currentFieldName, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD)) {
+ otherOptions.put(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD, context.getParser().longValue());
return true;
- } else if (parseFieldMatcher.match(currentFieldName, REHASH)) {
+ } else if (context.matchField(currentFieldName, REHASH)) {
// ignore
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java
index 7420fc0149..c42de23949 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java
@@ -20,10 +20,10 @@
package org.elasticsearch.search.aggregations.metrics.geobounds;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -48,11 +48,11 @@ public class GeoBoundsParser extends GeoPointValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if (parseFieldMatcher.match(currentFieldName, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD)) {
- otherOptions.put(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD, parser.booleanValue());
+ if (context.matchField(currentFieldName, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD)) {
+ otherOptions.put(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD, context.getParser().booleanValue());
return true;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java
index 6c9e9ba67b..8e88a11c6b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java
@@ -20,10 +20,9 @@
package org.elasticsearch.search.aggregations.metrics.geocentroid;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -40,8 +39,8 @@ public class GeoCentroidParser extends GeoPointValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java
index d2ddd4daa0..f0290e93fa 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.max;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -38,8 +38,8 @@ public class MaxParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java
index 194c08fc49..4381ca4189 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java
@@ -19,10 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.min;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -39,8 +38,8 @@ public class MinParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java
index ec145754a0..053a415c97 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java
@@ -21,10 +21,10 @@ package org.elasticsearch.search.aggregations.metrics.percentiles;
import com.carrotsearch.hppc.DoubleArrayList;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
@@ -45,10 +45,11 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ XContentParser parser = context.getParser();
if (token == XContentParser.Token.START_ARRAY) {
- if (parseFieldMatcher.match(currentFieldName, keysField())) {
+ if (context.matchField(currentFieldName, keysField())) {
DoubleArrayList values = new DoubleArrayList(10);
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
double value = parser.doubleValue();
@@ -61,7 +62,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse
return false;
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if (parseFieldMatcher.match(currentFieldName, KEYED_FIELD)) {
+ if (context.matchField(currentFieldName, KEYED_FIELD)) {
boolean keyed = parser.booleanValue();
otherOptions.put(KEYED_FIELD, keyed);
return true;
@@ -80,7 +81,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
- if (parseFieldMatcher.match(currentFieldName, COMPRESSION_FIELD)) {
+ if (context.matchField(currentFieldName, COMPRESSION_FIELD)) {
double compression = parser.doubleValue();
otherOptions.put(COMPRESSION_FIELD, compression);
} else {
@@ -96,7 +97,7 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
- if (parseFieldMatcher.match(currentFieldName, NUMBER_SIGNIFICANT_DIGITS_FIELD)) {
+ if (context.matchField(currentFieldName, NUMBER_SIGNIFICANT_DIGITS_FIELD)) {
int numberOfSignificantValueDigits = parser.intValue();
otherOptions.put(NUMBER_SIGNIFICANT_DIGITS_FIELD, numberOfSignificantValueDigits);
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java
index 8320774da0..e4811902f8 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java
@@ -180,7 +180,7 @@ public class PercentileRanksAggregationBuilder extends LeafOnly<ValuesSource.Num
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
- builder.field(PercentileRanksParser.VALUES_FIELD.getPreferredName(), values);
+ builder.array(PercentileRanksParser.VALUES_FIELD.getPreferredName(), values);
builder.field(AbstractPercentilesParser.KEYED_FIELD.getPreferredName(), keyed);
builder.startObject(method.getName());
if (method == PercentilesMethod.TDIGEST) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java
index f3fd9ad744..13d9b919f2 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java
@@ -180,7 +180,7 @@ public class PercentilesAggregationBuilder extends LeafOnly<ValuesSource.Numeric
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
- builder.field(PercentilesParser.PERCENTS_FIELD.getPreferredName(), percents);
+ builder.array(PercentilesParser.PERCENTS_FIELD.getPreferredName(), percents);
builder.field(AbstractPercentilesParser.KEYED_FIELD.getPreferredName(), keyed);
builder.startObject(method.getName());
if (method == PercentilesMethod.TDIGEST) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java
index ec0b2aef61..244881a515 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java
@@ -232,13 +232,13 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.VALUE_STRING) {
if (context.getParseFieldMatcher().match(currentFieldName, INIT_SCRIPT_FIELD)) {
- initScript = Script.parse(parser, context.getParseFieldMatcher());
+ initScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, MAP_SCRIPT_FIELD)) {
- mapScript = Script.parse(parser, context.getParseFieldMatcher());
+ mapScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, COMBINE_SCRIPT_FIELD)) {
- combineScript = Script.parse(parser, context.getParseFieldMatcher());
+ combineScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, REDUCE_SCRIPT_FIELD)) {
- reduceScript = Script.parse(parser, context.getParseFieldMatcher());
+ reduceScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (token == XContentParser.Token.START_OBJECT &&
context.getParseFieldMatcher().match(currentFieldName, PARAMS_FIELD)) {
params = parser.map();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java
index eacfc0068b..60e3d2ef0a 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.stats;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -38,8 +38,8 @@ public class StatsParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java
index c650847360..9644d26e93 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.stats.extended;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -38,11 +38,11 @@ public class ExtendedStatsParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
- if (parseFieldMatcher.match(currentFieldName, ExtendedStatsAggregator.SIGMA_FIELD)) {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
+ if (context.matchField(currentFieldName, ExtendedStatsAggregator.SIGMA_FIELD)) {
if (token.isValue()) {
- otherOptions.put(ExtendedStatsAggregator.SIGMA_FIELD, parser.doubleValue());
+ otherOptions.put(ExtendedStatsAggregator.SIGMA_FIELD, context.getParser().doubleValue());
return true;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java
index 6edc6cc890..ee82829b0a 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.sum;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
@@ -38,8 +38,8 @@ public class SumParser extends NumericValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java
index c72f0d3eb9..3547db7140 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java
@@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
@@ -63,7 +64,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
private boolean trackScores = false;
private List<SortBuilder<?>> sorts = null;
private HighlightBuilder highlightBuilder;
- private List<String> fieldNames;
+ private StoredFieldsContext storedFieldsContext;
private List<String> fieldDataFields;
private Set<ScriptField> scriptFields;
private FetchSourceContext fetchSourceContext;
@@ -78,7 +79,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
public TopHitsAggregationBuilder(StreamInput in) throws IOException {
super(in, TYPE);
explain = in.readBoolean();
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
int size = in.readVInt();
fieldDataFields = new ArrayList<>(size);
@@ -86,13 +87,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
fieldDataFields.add(in.readString());
}
}
- if (in.readBoolean()) {
- int size = in.readVInt();
- fieldNames = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- fieldNames.add(in.readString());
- }
- }
+ storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);
from = in.readVInt();
highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new);
if (in.readBoolean()) {
@@ -117,7 +112,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeBoolean(explain);
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalWriteable(fetchSourceContext);
boolean hasFieldDataFields = fieldDataFields != null;
out.writeBoolean(hasFieldDataFields);
if (hasFieldDataFields) {
@@ -126,14 +121,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
out.writeString(fieldName);
}
}
- boolean hasFieldNames = fieldNames != null;
- out.writeBoolean(hasFieldNames);
- if (hasFieldNames) {
- out.writeVInt(fieldNames.size());
- for (String fieldName : fieldNames) {
- out.writeString(fieldName);
- }
- }
+ out.writeOptionalWriteable(storedFieldsContext);
out.writeVInt(from);
out.writeOptionalWriteable(highlightBuilder);
boolean hasScriptFields = scriptFields != null;
@@ -355,47 +343,34 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
}
/**
- * Adds a field to load and return (note, it must be stored) as part of
- * the search request. If none are specified, the source of the document
- * will be return.
+ * Adds a stored field to load and return (note, it must be stored) as part of the search request.
+ * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}.
*/
- public TopHitsAggregationBuilder field(String field) {
- if (field == null) {
- throw new IllegalArgumentException("[field] must not be null: [" + name + "]");
- }
- if (fieldNames == null) {
- fieldNames = new ArrayList<>();
- }
- fieldNames.add(field);
- return this;
+ public TopHitsAggregationBuilder storedField(String field) {
+ return storedFields(Collections.singletonList(field));
}
/**
- * Sets the fields to load and return as part of the search request. If
- * none are specified, the source of the document will be returned.
+ * Sets the stored fields to load and return as part of the search request.
+ * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}.
*/
- public TopHitsAggregationBuilder fields(List<String> fields) {
+ public TopHitsAggregationBuilder storedFields(List<String> fields) {
if (fields == null) {
throw new IllegalArgumentException("[fields] must not be null: [" + name + "]");
}
- this.fieldNames = fields;
- return this;
- }
-
- /**
- * Sets no fields to be loaded, resulting in only id and type to be
- * returned per field.
- */
- public TopHitsAggregationBuilder noFields() {
- this.fieldNames = Collections.emptyList();
+ if (storedFieldsContext == null) {
+ storedFieldsContext = StoredFieldsContext.fromList(fields);
+ } else {
+ storedFieldsContext.addFieldNames(fields);
+ }
return this;
}
/**
- * Gets the fields to load and return as part of the search request.
+ * Gets the stored fields context
*/
- public List<String> fields() {
- return fieldNames;
+ public StoredFieldsContext storedFields() {
+ return storedFieldsContext;
}
/**
@@ -552,8 +527,9 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
@Override
protected TopHitsAggregatorFactory doBuild(AggregationContext context, AggregatorFactory<?> parent, Builder subfactoriesBuilder)
throws IOException {
- return new TopHitsAggregatorFactory(name, type, from, size, explain, version, trackScores, sorts, highlightBuilder, fieldNames,
- fieldDataFields, scriptFields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData);
+ return new TopHitsAggregatorFactory(name, type, from, size, explain, version, trackScores, sorts, highlightBuilder,
+ storedFieldsContext, fieldDataFields, scriptFields, fetchSourceContext, context,
+ parent, subfactoriesBuilder, metaData);
}
@Override
@@ -566,16 +542,8 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
if (fetchSourceContext != null) {
builder.field(SearchSourceBuilder._SOURCE_FIELD.getPreferredName(), fetchSourceContext);
}
- if (fieldNames != null) {
- if (fieldNames.size() == 1) {
- builder.field(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), fieldNames.get(0));
- } else {
- builder.startArray(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName());
- for (String fieldName : fieldNames) {
- builder.value(fieldName);
- }
- builder.endArray();
- }
+ if (storedFieldsContext != null) {
+ storedFieldsContext.toXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), builder);
}
if (fieldDataFields != null) {
builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName());
@@ -628,11 +596,10 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.TRACK_SCORES_FIELD)) {
factory.trackScores(parser.booleanValue());
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
- factory.fetchSource(FetchSourceContext.parse(context));
+ factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.STORED_FIELDS_FIELD)) {
- List<String> fieldNames = new ArrayList<>();
- fieldNames.add(parser.text());
- factory.fields(fieldNames);
+ factory.storedFieldsContext =
+ StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SORT_FIELD)) {
factory.sort(parser.text());
} else {
@@ -641,7 +608,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
- factory.fetchSource(FetchSourceContext.parse(context));
+ factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SCRIPT_FIELDS_FIELD)) {
List<ScriptField> scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@@ -655,7 +622,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SCRIPT_FIELD)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName,
SearchSourceBuilder.IGNORE_FAILURE_FIELD)) {
ignoreFailure = parser.booleanValue();
@@ -666,7 +633,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SCRIPT_FIELD)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + currentFieldName + "].",
@@ -696,16 +663,8 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
} else if (token == XContentParser.Token.START_ARRAY) {
if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.STORED_FIELDS_FIELD)) {
- List<String> fieldNames = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- if (token == XContentParser.Token.VALUE_STRING) {
- fieldNames.add(parser.text());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING
- + "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
- }
- }
- factory.fields(fieldNames);
+ factory.storedFieldsContext =
+ StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.DOCVALUE_FIELDS_FIELD)) {
List<String> fieldDataFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
@@ -721,7 +680,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
List<SortBuilder<?>> sorts = SortBuilder.fromXContent(context);
factory.sorts(sorts);
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
- factory.fetchSource(FetchSourceContext.parse(context));
+ factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
@@ -736,8 +695,8 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
@Override
protected int doHashCode() {
- return Objects.hash(explain, fetchSourceContext, fieldDataFields, fieldNames, from, highlightBuilder, scriptFields, size, sorts,
- trackScores, version);
+ return Objects.hash(explain, fetchSourceContext, fieldDataFields, storedFieldsContext, from, highlightBuilder,
+ scriptFields, size, sorts, trackScores, version);
}
@Override
@@ -746,7 +705,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
return Objects.equals(explain, other.explain)
&& Objects.equals(fetchSourceContext, other.fetchSourceContext)
&& Objects.equals(fieldDataFields, other.fieldDataFields)
- && Objects.equals(fieldNames, other.fieldNames)
+ && Objects.equals(storedFieldsContext, other.storedFieldsContext)
&& Objects.equals(from, other.from)
&& Objects.equals(highlightBuilder, other.highlightBuilder)
&& Objects.equals(scriptFields, other.scriptFields)
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java
index 70a9269547..7c6a743a20 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java
@@ -28,10 +28,9 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.DocValueField;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.SubSearchContext;
import org.elasticsearch.search.sort.SortAndFormats;
@@ -53,15 +52,16 @@ public class TopHitsAggregatorFactory extends AggregatorFactory<TopHitsAggregato
private final boolean trackScores;
private final List<SortBuilder<?>> sorts;
private final HighlightBuilder highlightBuilder;
- private final List<String> fieldNames;
+ private final StoredFieldsContext storedFieldsContext;
private final List<String> docValueFields;
private final Set<ScriptField> scriptFields;
private final FetchSourceContext fetchSourceContext;
public TopHitsAggregatorFactory(String name, Type type, int from, int size, boolean explain, boolean version, boolean trackScores,
- List<SortBuilder<?>> sorts, HighlightBuilder highlightBuilder, List<String> fieldNames, List<String> docValueFields,
- Set<ScriptField> scriptFields, FetchSourceContext fetchSourceContext, AggregationContext context, AggregatorFactory<?> parent,
- AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException {
+ List<SortBuilder<?>> sorts, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext,
+ List<String> docValueFields, Set<ScriptField> scriptFields, FetchSourceContext fetchSourceContext,
+ AggregationContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories,
+ Map<String, Object> metaData) throws IOException {
super(name, type, context, parent, subFactories, metaData);
this.from = from;
this.size = size;
@@ -70,7 +70,7 @@ public class TopHitsAggregatorFactory extends AggregatorFactory<TopHitsAggregato
this.trackScores = trackScores;
this.sorts = sorts;
this.highlightBuilder = highlightBuilder;
- this.fieldNames = fieldNames;
+ this.storedFieldsContext = storedFieldsContext;
this.docValueFields = docValueFields;
this.scriptFields = scriptFields;
this.fetchSourceContext = fetchSourceContext;
@@ -92,16 +92,11 @@ public class TopHitsAggregatorFactory extends AggregatorFactory<TopHitsAggregato
subSearchContext.sort(optionalSort.get());
}
}
- if (fieldNames != null) {
- subSearchContext.fieldNames().addAll(fieldNames);
+ if (storedFieldsContext != null) {
+ subSearchContext.storedFieldsContext(storedFieldsContext);
}
if (docValueFields != null) {
- DocValueFieldsContext docValueFieldsContext = subSearchContext
- .getFetchSubPhaseContext(DocValueFieldsFetchSubPhase.CONTEXT_FACTORY);
- for (String field : docValueFields) {
- docValueFieldsContext.add(new DocValueField(field));
- }
- docValueFieldsContext.setHitExecutionNeeded(true);
+ subSearchContext.docValueFieldsContext(new DocValueFieldsContext(docValueFields));
}
if (scriptFields != null) {
for (ScriptField field : scriptFields) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java
index fe8a34f242..bd61e276fe 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.aggregations.metrics.valuecount;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
@@ -40,8 +40,8 @@ public class ValueCountParser extends AnyValuesSourceParser {
}
@Override
- protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
+ protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java
index 435d0239cb..81df16f2bf 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java
@@ -113,7 +113,7 @@ public class PercentilesBucketPipelineAggregationBuilder
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
if (percents != null) {
- builder.field(PERCENTS_FIELD.getPreferredName(), percents);
+ builder.array(PERCENTS_FIELD.getPreferredName(), percents);
}
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java
index cee17076e5..cd7b1bb828 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java
@@ -179,7 +179,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr
} else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) {
gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation());
} else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].");
@@ -201,7 +201,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) {
Map<String, Object> map = parser.map();
bucketsPathsMap = new HashMap<>();
@@ -260,4 +260,4 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr
public String getWriteableName() {
return NAME;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java
index 97cf02d69a..e3b4237672 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java
@@ -142,7 +142,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg
} else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) {
gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation());
} else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].");
@@ -164,7 +164,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) {
Map<String, Object> map = parser.map();
bucketsPathsMap = new HashMap<>();
@@ -219,4 +219,4 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg
public String getWriteableName() {
return NAME;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java
index 51d2ea2e8c..57eea9ccf6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java
@@ -20,7 +20,6 @@
package org.elasticsearch.search.aggregations.support;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
@@ -95,6 +94,8 @@ public abstract class AbstractValuesSourceParser<VS extends ValuesSource>
Object missing = null;
DateTimeZone timezone = null;
Map<ParseField, Object> otherOptions = new HashMap<>();
+ XContentParseContext parserContext =
+ new XContentParseContext(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
XContentParser.Token token;
String currentFieldName = null;
@@ -126,22 +127,22 @@ public abstract class AbstractValuesSourceParser<VS extends ValuesSource>
+ valueType + "]. It can only work on value of type ["
+ targetValueType + "]");
}
- } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
+ } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) {
throw new ParsingException(parser.getTokenLocation(),
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
}
- } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
+ } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) {
throw new ParsingException(parser.getTokenLocation(),
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
}
} else if (scriptable && token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
- } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
+ } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) {
throw new ParsingException(parser.getTokenLocation(),
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
}
- } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) {
+ } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) {
throw new ParsingException(parser.getTokenLocation(),
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
}
@@ -184,8 +185,7 @@ public abstract class AbstractValuesSourceParser<VS extends ValuesSource>
* the target type of the final value output by the aggregation
* @param otherOptions
* a {@link Map} containing the extra options parsed by the
- * {@link #token(String, String, org.elasticsearch.common.xcontent.XContentParser.Token,
- * XContentParser, ParseFieldMatcher, Map)}
+ * {@link #token(String, String, XContentParser.Token, XContentParseContext, Map)}
* method
* @return the created factory
*/
@@ -203,10 +203,8 @@ public abstract class AbstractValuesSourceParser<VS extends ValuesSource>
* the name of the current field being parsed
* @param token
* the current token for the parser
- * @param parser
- * the parser
- * @param parseFieldMatcher
- * the {@link ParseFieldMatcher} to use to match field names
+ * @param context
+ * the query context
* @param otherOptions
* a {@link Map} of options to be populated by successive calls
* to this method which will then be passed to the
@@ -217,6 +215,6 @@ public abstract class AbstractValuesSourceParser<VS extends ValuesSource>
* @throws IOException
* if an error occurs whilst parsing
*/
- protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
- ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException;
+ protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token,
+ XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java
new file mode 100644
index 0000000000..07c33f1f47
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/XContentParseContext.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+/**
+ * A minimal context for parsing xcontent into aggregation builders.
+ * Only a minimal set of dependencies and settings are available.
+ */
+public final class XContentParseContext {
+
+ private final XContentParser parser;
+
+ private final ParseFieldMatcher parseFieldMatcher;
+
+ private final String defaultScriptLanguage;
+
+ public XContentParseContext(XContentParser parser, ParseFieldMatcher parseFieldMatcher, String defaultScriptLanguage) {
+ this.parser = parser;
+ this.parseFieldMatcher = parseFieldMatcher;
+ this.defaultScriptLanguage = defaultScriptLanguage;
+ }
+
+ public XContentParser getParser() {
+ return parser;
+ }
+
+ public ParseFieldMatcher getParseFieldMatcher() {
+ return parseFieldMatcher;
+ }
+
+ public String getDefaultScriptLanguage() {
+ return defaultScriptLanguage;
+ }
+
+ /**
+ * Returns whether the parse field we're looking for matches with the found field name.
+ *
+ * Helper that delegates to {@link ParseFieldMatcher#match(String, ParseField)}.
+ */
+ public boolean matchField(String fieldName, ParseField parseField) {
+ return parseFieldMatcher.match(fieldName, parseField);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 4ae8eb89e0..c64a5fd552 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -20,30 +20,31 @@
package org.elasticsearch.search.builder;
import com.carrotsearch.hppc.ObjectFloatHashMap;
-import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchExtBuilder;
+import org.elasticsearch.search.SearchExtParser;
+import org.elasticsearch.search.SearchExtRegistry;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.SearchContext;
@@ -62,6 +63,10 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+import static org.elasticsearch.common.collect.Tuple.tuple;
/**
* A search source builder allowing to easily build search source. Simple
@@ -103,9 +108,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public static final ParseField SLICE = new ParseField("slice");
public static SearchSourceBuilder fromXContent(QueryParseContext context, AggregatorParsers aggParsers,
- Suggesters suggesters) throws IOException {
+ Suggesters suggesters, SearchExtRegistry searchExtRegistry) throws IOException {
SearchSourceBuilder builder = new SearchSourceBuilder();
- builder.parseXContent(context, aggParsers, suggesters);
+ builder.parseXContent(context, aggParsers, suggesters, searchExtRegistry);
return builder;
}
@@ -148,7 +153,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private TimeValue timeout = null;
private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER;
- private List<String> storedFieldNames;
+ private StoredFieldsContext storedFieldsContext;
private List<String> docValueFields;
private List<ScriptField> scriptFields;
private FetchSourceContext fetchSourceContext;
@@ -159,13 +164,13 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private SuggestBuilder suggestBuilder;
- private List<RescoreBuilder<?>> rescoreBuilders;
+ private List<RescoreBuilder> rescoreBuilders;
private ObjectFloatHashMap<String> indexBoost = null;
private List<String> stats;
- private BytesReference ext = null;
+ private List<SearchExtBuilder> extBuilders = Collections.emptyList();
private boolean profile = false;
@@ -182,16 +187,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public SearchSourceBuilder(StreamInput in) throws IOException {
aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new);
explain = in.readOptionalBoolean();
- fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
+ fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
docValueFields = (List<String>) in.readGenericValue();
- storedFieldNames = (List<String>) in.readGenericValue();
+ storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);
from = in.readVInt();
highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new);
- boolean hasIndexBoost = in.readBoolean();
- if (hasIndexBoost) {
- int size = in.readVInt();
- indexBoost = new ObjectFloatHashMap<>(size);
- for (int i = 0; i < size; i++) {
+ int indexBoostSize = in.readVInt();
+ if (indexBoostSize > 0) {
+ indexBoost = new ObjectFloatHashMap<>(indexBoostSize);
+ for (int i = 0; i < indexBoostSize; i++) {
indexBoost.put(in.readString(), in.readFloat());
}
}
@@ -199,18 +203,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class);
queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class);
if (in.readBoolean()) {
- int size = in.readVInt();
- rescoreBuilders = new ArrayList<>();
- for (int i = 0; i < size; i++) {
- rescoreBuilders.add(in.readNamedWriteable(RescoreBuilder.class));
- }
+ rescoreBuilders = in.readNamedWriteableList(RescoreBuilder.class);
}
if (in.readBoolean()) {
- int size = in.readVInt();
- scriptFields = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- scriptFields.add(new ScriptField(in));
- }
+ scriptFields = in.readList(ScriptField::new);
}
size = in.readVInt();
if (in.readBoolean()) {
@@ -221,18 +217,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
}
if (in.readBoolean()) {
- int size = in.readVInt();
- stats = new ArrayList<>();
- for (int i = 0; i < size; i++) {
- stats.add(in.readString());
- }
+ stats = in.readList(StreamInput::readString);
}
suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new);
terminateAfter = in.readVInt();
timeout = in.readOptionalWriteable(TimeValue::new);
trackScores = in.readBoolean();
version = in.readOptionalBoolean();
- ext = in.readOptionalBytesReference();
+ extBuilders = in.readNamedWriteableList(SearchExtBuilder.class);
profile = in.readBoolean();
searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new);
sliceBuilder = in.readOptionalWriteable(SliceBuilder::new);
@@ -242,19 +234,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(aggregations);
out.writeOptionalBoolean(explain);
- out.writeOptionalStreamable(fetchSourceContext);
+ out.writeOptionalWriteable(fetchSourceContext);
out.writeGenericValue(docValueFields);
- out.writeGenericValue(storedFieldNames);
+ out.writeOptionalWriteable(storedFieldsContext);
out.writeVInt(from);
out.writeOptionalWriteable(highlightBuilder);
- boolean hasIndexBoost = indexBoost != null;
- out.writeBoolean(hasIndexBoost);
- if (hasIndexBoost) {
- out.writeVInt(indexBoost.size());
- for (ObjectCursor<String> key : indexBoost.keys()) {
- out.writeString(key.value);
- out.writeFloat(indexBoost.get(key.value));
- }
+ int indexBoostSize = indexBoost == null ? 0 : indexBoost.size();
+ out.writeVInt(indexBoostSize);
+ if (indexBoostSize > 0) {
+ writeIndexBoost(out);
}
out.writeOptionalFloat(minScore);
out.writeOptionalNamedWriteable(postQueryBuilder);
@@ -262,18 +250,12 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
boolean hasRescoreBuilders = rescoreBuilders != null;
out.writeBoolean(hasRescoreBuilders);
if (hasRescoreBuilders) {
- out.writeVInt(rescoreBuilders.size());
- for (RescoreBuilder<?> rescoreBuilder : rescoreBuilders) {
- out.writeNamedWriteable(rescoreBuilder);
- }
+ out.writeNamedWriteableList(rescoreBuilders);
}
boolean hasScriptFields = scriptFields != null;
out.writeBoolean(hasScriptFields);
if (hasScriptFields) {
- out.writeVInt(scriptFields.size());
- for (ScriptField scriptField : scriptFields) {
- scriptField.writeTo(out);
- }
+ out.writeList(scriptFields);
}
out.writeVInt(size);
boolean hasSorts = sorts != null;
@@ -287,22 +269,30 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
boolean hasStats = stats != null;
out.writeBoolean(hasStats);
if (hasStats) {
- out.writeVInt(stats.size());
- for (String stat : stats) {
- out.writeString(stat);
- }
+ out.writeStringList(stats);
}
out.writeOptionalWriteable(suggestBuilder);
out.writeVInt(terminateAfter);
out.writeOptionalWriteable(timeout);
out.writeBoolean(trackScores);
out.writeOptionalBoolean(version);
- out.writeOptionalBytesReference(ext);
+ out.writeNamedWriteableList(extBuilders);
out.writeBoolean(profile);
out.writeOptionalWriteable(searchAfterBuilder);
out.writeOptionalWriteable(sliceBuilder);
}
+ private void writeIndexBoost(StreamOutput out) throws IOException {
+ List<Tuple<String, Float>> ibs = StreamSupport
+ .stream(indexBoost.spliterator(), false)
+ .map(i -> tuple(i.key, i.value)).sorted((o1, o2) -> o1.v1().compareTo(o2.v1()))
+ .collect(Collectors.toList());
+ for (Tuple<String, Float> ib : ibs) {
+ out.writeString(ib.v1());
+ out.writeFloat(ib.v2());
+ }
+ }
+
/**
* Sets the search query for this request.
*
@@ -638,7 +628,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
/**
* Gets the bytes representing the rescore builders for this request.
*/
- public List<RescoreBuilder<?>> rescores() {
+ public List<RescoreBuilder> rescores() {
return rescoreBuilders;
}
@@ -711,11 +701,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* return.
*/
public SearchSourceBuilder storedField(String name) {
- if (storedFieldNames == null) {
- storedFieldNames = new ArrayList<>();
- }
- storedFieldNames.add(name);
- return this;
+ return storedFields(Collections.singletonList(name));
}
/**
@@ -723,24 +709,27 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* are specified, the source of the document will be returned.
*/
public SearchSourceBuilder storedFields(List<String> fields) {
- this.storedFieldNames = fields;
+ if (storedFieldsContext == null) {
+ storedFieldsContext = StoredFieldsContext.fromList(fields);
+ } else {
+ storedFieldsContext.addFieldNames(fields);
+ }
return this;
}
/**
- * Sets no stored fields to be loaded, resulting in only id and type to be returned
- * per field.
+ * Indicates how the stored fields should be fetched.
*/
- public SearchSourceBuilder noStoredFields() {
- this.storedFieldNames = Collections.emptyList();
+ public SearchSourceBuilder storedFields(StoredFieldsContext context) {
+ storedFieldsContext = context;
return this;
}
/**
- * Gets the stored fields to load and return as part of the search request.
+ * Gets the stored fields context.
*/
- public List<String> storedFields() {
- return storedFieldNames;
+ public StoredFieldsContext storedFields() {
+ return storedFieldsContext;
}
@@ -865,13 +854,13 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
return stats;
}
- public SearchSourceBuilder ext(XContentBuilder ext) {
- this.ext = ext.bytes();
+ public SearchSourceBuilder ext(List<SearchExtBuilder> searchExtBuilders) {
+ this.extBuilders = Objects.requireNonNull(searchExtBuilders, "searchExtBuilders must not be null");
return this;
}
- public BytesReference ext() {
- return ext;
+ public List<SearchExtBuilder> ext() {
+ return extBuilders;
}
/**
@@ -909,10 +898,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
SearchSourceBuilder rewrittenBuilder = new SearchSourceBuilder();
rewrittenBuilder.aggregations = aggregations;
rewrittenBuilder.explain = explain;
- rewrittenBuilder.ext = ext;
+ rewrittenBuilder.extBuilders = extBuilders;
rewrittenBuilder.fetchSourceContext = fetchSourceContext;
rewrittenBuilder.docValueFields = docValueFields;
- rewrittenBuilder.storedFieldNames = storedFieldNames;
+ rewrittenBuilder.storedFieldsContext = storedFieldsContext;
rewrittenBuilder.from = from;
rewrittenBuilder.highlightBuilder = highlightBuilder;
rewrittenBuilder.indexBoost = indexBoost;
@@ -938,17 +927,18 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
/**
* Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up
* different defaults than a regular SearchSourceBuilder would have and use
- * {@link #fromXContent(QueryParseContext, AggregatorParsers, Suggesters)} if you have normal defaults.
+ * {@link #fromXContent(QueryParseContext, AggregatorParsers, Suggesters, SearchExtRegistry)} if you have normal defaults.
*/
- public void parseXContent(QueryParseContext context, AggregatorParsers aggParsers, Suggesters suggesters)
+ public void parseXContent(QueryParseContext context, AggregatorParsers aggParsers,
+ Suggesters suggesters, SearchExtRegistry searchExtRegistry)
throws IOException {
XContentParser parser = context.parser();
XContentParser.Token token = parser.currentToken();
String currentFieldName = null;
if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
- throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT + "] but found [" + token + "]",
- parser.getTokenLocation());
+ throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT +
+ "] but found [" + token + "]", parser.getTokenLocation());
}
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@@ -971,9 +961,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (context.getParseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
trackScores = parser.booleanValue();
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- fetchSourceContext = FetchSourceContext.parse(context);
+ fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, STORED_FIELDS_FIELD)) {
- storedField(parser.text());
+ storedFieldsContext =
+ StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);
} else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
sort(parser.text());
} else if (context.getParseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
@@ -992,7 +983,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (context.getParseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
postQueryBuilder = context.parseInnerQueryBuilder().orElse(null);
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- fetchSourceContext = FetchSourceContext.parse(context);
+ fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@@ -1006,8 +997,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (token.isValue()) {
indexBoost.put(currentFieldName, parser.floatValue());
} else {
- throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
- parser.getTokenLocation());
+ throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token +
+ " in [" + currentFieldName + "].", parser.getTokenLocation());
}
}
} else if (context.getParseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)
@@ -1023,8 +1014,23 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
rescoreBuilders = new ArrayList<>();
rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));
} else if (context.getParseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
- XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- ext = xContentBuilder.bytes();
+ extBuilders = new ArrayList<>();
+ String extSectionName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ extSectionName = parser.currentName();
+ } else {
+ SearchExtParser searchExtParser = searchExtRegistry.lookup(extSectionName,
+ context.getParseFieldMatcher(), parser.getTokenLocation());
+ SearchExtBuilder searchExtBuilder = searchExtParser.fromXContent(parser);
+ if (searchExtBuilder.getWriteableName().equals(extSectionName) == false) {
+ throw new IllegalStateException("The parsed [" + searchExtBuilder.getClass().getName() + "] object has a "
+ + "different writeable name compared to the name of the section that it was parsed from: found ["
+ + searchExtBuilder.getWriteableName() + "] expected [" + extSectionName + "]");
+ }
+ extBuilders.add(searchExtBuilder);
+ }
+ }
} else if (context.getParseFieldMatcher().match(currentFieldName, SLICE)) {
sliceBuilder = SliceBuilder.fromXContent(context);
} else {
@@ -1033,23 +1039,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (context.getParseFieldMatcher().match(currentFieldName, STORED_FIELDS_FIELD)) {
- storedFieldNames = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- if (token == XContentParser.Token.VALUE_STRING) {
- storedFieldNames.add(parser.text());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + "] in ["
- + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
- }
- }
+ storedFieldsContext = StoredFieldsContext.fromXContent(STORED_FIELDS_FIELD.getPreferredName(), context);
} else if (context.getParseFieldMatcher().match(currentFieldName, DOCVALUE_FIELDS_FIELD)) {
docValueFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
docValueFields.add(parser.text());
} else {
- throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + "] in ["
- + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
+ throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING +
+ "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
} else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
@@ -1065,12 +1063,12 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (token == XContentParser.Token.VALUE_STRING) {
stats.add(parser.text());
} else {
- throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + "] in ["
- + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
+ throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING +
+ "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation());
}
}
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
- fetchSourceContext = FetchSourceContext.parse(context);
+ fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) {
searchAfterBuilder = SearchAfterBuilder.fromXContent(parser, context.getParseFieldMatcher());
} else if (context.getParseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
@@ -1141,16 +1139,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext);
}
- if (storedFieldNames != null) {
- if (storedFieldNames.size() == 1) {
- builder.field(STORED_FIELDS_FIELD.getPreferredName(), storedFieldNames.get(0));
- } else {
- builder.startArray(STORED_FIELDS_FIELD.getPreferredName());
- for (String fieldName : storedFieldNames) {
- builder.value(fieldName);
- }
- builder.endArray();
- }
+ if (storedFieldsContext != null) {
+ storedFieldsContext.toXContent(STORED_FIELDS_FIELD.getPreferredName(), builder);
}
if (docValueFields != null) {
@@ -1182,7 +1172,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
if (searchAfterBuilder != null) {
- builder.field(SEARCH_AFTER.getPreferredName(), searchAfterBuilder.getSortValues());
+ builder.array(SEARCH_AFTER.getPreferredName(), searchAfterBuilder.getSortValues());
}
if (sliceBuilder != null) {
@@ -1226,12 +1216,12 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.field(STATS_FIELD.getPreferredName(), stats);
}
- if (ext != null) {
- builder.field(EXT_FIELD.getPreferredName());
- try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(ext)) {
- parser.nextToken();
- builder.copyCurrentStructure(parser);
+ if (extBuilders != null) {
+ builder.startObject(EXT_FIELD.getPreferredName());
+ for (SearchExtBuilder extBuilder : extBuilders) {
+ extBuilder.toXContent(builder, params);
}
+ builder.endObject();
}
}
@@ -1278,7 +1268,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else if (context.getParseFieldMatcher().match(currentFieldName, IGNORE_FAILURE_FIELD)) {
ignoreFailure = parser.booleanValue();
} else {
@@ -1287,7 +1277,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) {
- script = Script.parse(parser, context.getParseFieldMatcher());
+ script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName
+ "].", parser.getTokenLocation());
@@ -1349,9 +1339,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
@Override
public int hashCode() {
- return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldNames, from,
- highlightBuilder, indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
- size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, profile);
+ return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder,
+ indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder,
+ sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, profile, extBuilders);
}
@Override
@@ -1367,7 +1357,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
&& Objects.equals(explain, other.explain)
&& Objects.equals(fetchSourceContext, other.fetchSourceContext)
&& Objects.equals(docValueFields, other.docValueFields)
- && Objects.equals(storedFieldNames, other.storedFieldNames)
+ && Objects.equals(storedFieldsContext, other.storedFieldsContext)
&& Objects.equals(from, other.from)
&& Objects.equals(highlightBuilder, other.highlightBuilder)
&& Objects.equals(indexBoost, other.indexBoost)
@@ -1386,7 +1376,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
&& Objects.equals(timeout, other.timeout)
&& Objects.equals(trackScores, other.trackScores)
&& Objects.equals(version, other.version)
- && Objects.equals(profile, other.profile);
+ && Objects.equals(profile, other.profile)
+ && Objects.equals(extBuilders, other.extBuilders);
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java
index de06655f41..1359be24a1 100644
--- a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java
@@ -37,7 +37,8 @@ import java.util.Collection;
import java.util.Iterator;
/**
- *
+ * Dfs phase of a search request, used to make scoring 100% accurate by collecting additional info from each shard before the query phase.
+ * The additional information is used to better compare the scores coming from all the shards, which depend on local factors (e.g. idf)
*/
public class DfsPhase implements SearchPhase {
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
index b292a2e800..41ea0e294d 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
@@ -43,7 +43,6 @@ import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHitField;
-import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.InnerHitsFetchSubPhase;
@@ -62,11 +61,11 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder;
/**
- *
+ * Fetch phase of a search request, used to fetch the actual top matching documents to be returned to the client, identified
+ * after reducing all of the matches returned by the query phase
*/
public class FetchPhase implements SearchPhase {
@@ -78,33 +77,27 @@ public class FetchPhase implements SearchPhase {
}
@Override
- public Map<String, ? extends SearchParseElement> parseElements() {
- Map<String, SearchParseElement> parseElements = new HashMap<>();
- for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
- parseElements.putAll(fetchSubPhase.parseElements());
- }
- return unmodifiableMap(parseElements);
- }
-
- @Override
public void preProcess(SearchContext context) {
}
@Override
public void execute(SearchContext context) {
- FieldsVisitor fieldsVisitor;
+ final FieldsVisitor fieldsVisitor;
Set<String> fieldNames = null;
List<String> fieldNamePatterns = null;
- if (!context.hasFieldNames()) {
+ StoredFieldsContext storedFieldsContext = context.storedFieldsContext();
+
+ if (storedFieldsContext == null) {
// no fields specified, default to return source if no explicit indication
if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
context.fetchSourceContext(new FetchSourceContext(true));
}
fieldsVisitor = new FieldsVisitor(context.sourceRequested());
- } else if (context.fieldNames().isEmpty()) {
- fieldsVisitor = new FieldsVisitor(context.sourceRequested());
+ } else if (storedFieldsContext.fetchFields() == false) {
+ // disable stored fields entirely
+ fieldsVisitor = null;
} else {
- for (String fieldName : context.fieldNames()) {
+ for (String fieldName : context.storedFieldsContext().fieldNames()) {
if (fieldName.equals(SourceFieldMapper.NAME)) {
if (context.hasFetchSourceContext()) {
context.fetchSourceContext().fetchSource(true);
@@ -133,8 +126,13 @@ public class FetchPhase implements SearchPhase {
}
}
boolean loadSource = context.sourceRequested();
- fieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames,
- fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, loadSource);
+ if (fieldNames == null && fieldNamePatterns == null) {
+ // empty list specified, default to disable _source if no explicit indication
+ fieldsVisitor = new FieldsVisitor(loadSource);
+ } else {
+ fieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames,
+ fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, loadSource);
+ }
}
InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
@@ -182,6 +180,9 @@ public class FetchPhase implements SearchPhase {
}
private InternalSearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, LeafReaderContext subReaderContext) {
+ if (fieldsVisitor == null) {
+ return new InternalSearchHit(docId);
+ }
loadStoredFields(context, subReaderContext, fieldsVisitor, subDocId);
fieldsVisitor.postProcess(context.mapperService());
@@ -273,9 +274,9 @@ public class FetchPhase implements SearchPhase {
private Map<String, SearchHitField> getSearchFields(SearchContext context, int nestedSubDocId, Set<String> fieldNames, List<String> fieldNamePatterns, LeafReaderContext subReaderContext) {
Map<String, SearchHitField> searchFields = null;
- if (context.hasFieldNames() && !context.fieldNames().isEmpty()) {
+ if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) {
FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames,
- fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, false);
+ fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, false);
if (nestedFieldsVisitor != null) {
loadStoredFields(context, subReaderContext, nestedFieldsVisitor, nestedSubDocId);
nestedFieldsVisitor.postProcess(context.mapperService());
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
index ed8c0358db..eac878569e 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
@@ -21,14 +21,13 @@ package org.elasticsearch.search.fetch;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.transport.TransportResponse;
import java.io.IOException;
-import static org.elasticsearch.search.internal.InternalSearchHits.StreamContext;
-
/**
*
*/
@@ -70,9 +69,17 @@ public class FetchSearchResult extends TransportResponse implements FetchSearchR
}
public void hits(InternalSearchHits hits) {
+ assert assertNoSearchTarget(hits);
this.hits = hits;
}
+ private boolean assertNoSearchTarget(InternalSearchHits hits) {
+ for (SearchHit hit : hits.hits()) {
+ assert hit.getShard() == null : "expected null but got: " + hit.getShard();
+ }
+ return true;
+ }
+
public InternalSearchHits hits() {
return hits;
}
@@ -96,13 +103,13 @@ public class FetchSearchResult extends TransportResponse implements FetchSearchR
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
- hits = InternalSearchHits.readSearchHits(in, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
+ hits = InternalSearchHits.readSearchHits(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(id);
- hits.writeTo(out, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
+ hits.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
index 8efb995926..1783652d12 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
@@ -22,16 +22,14 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
-import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.SearchContext;
-import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
- * Sub phase within the fetch phase used to fetch things *about* the documents like highlghting or matched queries.
+ * Sub phase within the fetch phase used to fetch things *about* the documents like highlighting or matched queries.
*/
public interface FetchSubPhase {
@@ -69,10 +67,6 @@ public interface FetchSubPhase {
return searcher.getIndexReader();
}
- public IndexSearcher topLevelSearcher() {
- return searcher;
- }
-
public Map<String, Object> cache() {
if (cache == null) {
cache = new HashMap<>();
@@ -82,10 +76,6 @@ public interface FetchSubPhase {
}
- default Map<String, ? extends SearchParseElement> parseElements() {
- return Collections.emptyMap();
- }
-
/**
* Executes the hit level phase, with a reader and doc id (note, its a low level reader, and the matching doc).
*/
@@ -93,23 +83,4 @@ public interface FetchSubPhase {
default void hitsExecute(SearchContext context, InternalSearchHit[] hits) {}
-
- /**
- * This interface is in the fetch phase plugin mechanism.
- * Whenever a new search is executed we create a new {@link SearchContext} that holds individual contexts for each {@link org.elasticsearch.search.fetch.FetchSubPhase}.
- * Fetch phases that use the plugin mechanism must provide a ContextFactory to the SearchContext that creates the fetch phase context and also associates them with a name.
- * See {@link SearchContext#getFetchSubPhaseContext(FetchSubPhase.ContextFactory)}
- */
- interface ContextFactory<SubPhaseContext extends FetchSubPhaseContext> {
-
- /**
- * The name of the context.
- */
- String getName();
-
- /**
- * Creates a new instance of a FetchSubPhaseContext that holds all information a FetchSubPhase needs to execute on hits.
- */
- SubPhaseContext newContextInstance();
- }
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseContext.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseContext.java
deleted file mode 100644
index 856c0ad902..0000000000
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseContext.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.fetch;
-
-import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
-
-/**
- * All configuration and context needed by the FetchSubPhase to execute on hits.
- * The only required information in this base class is whether or not the sub phase needs to be run at all.
- * It can be extended by FetchSubPhases to hold information the phase needs to execute on hits.
- * See {@link org.elasticsearch.search.fetch.FetchSubPhase.ContextFactory} and also {@link DocValueFieldsContext} for an example.
- */
-public class FetchSubPhaseContext {
-
- // This is to store if the FetchSubPhase should be executed at all.
- private boolean hitExecutionNeeded = false;
-
- /**
- * Set if this phase should be executed at all.
- */
- public void setHitExecutionNeeded(boolean hitExecutionNeeded) {
- this.hitExecutionNeeded = hitExecutionNeeded;
- }
-
- /**
- * Returns if this phase be executed at all.
- */
- public boolean hitExecutionNeeded() {
- return hitExecutionNeeded;
- }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseParseElement.java
deleted file mode 100644
index 752aeecf04..0000000000
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSubPhaseParseElement.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.fetch;
-
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.search.SearchParseElement;
-import org.elasticsearch.search.internal.SearchContext;
-
-/**
- * A parse element for a {@link org.elasticsearch.search.fetch.FetchSubPhase} that is used when parsing a search request.
- */
-public abstract class FetchSubPhaseParseElement<SubPhaseContext extends FetchSubPhaseContext> implements SearchParseElement {
-
- @Override
- public final void parse(XContentParser parser, SearchContext context) throws Exception {
- SubPhaseContext fetchSubPhaseContext = context.getFetchSubPhaseContext(getContextFactory());
- // this is to make sure that the SubFetchPhase knows it should execute
- fetchSubPhaseContext.setHitExecutionNeeded(true);
- innerParse(parser, fetchSubPhaseContext, context);
- }
-
- /**
- * Implement the actual parsing here.
- */
- protected abstract void innerParse(XContentParser parser, SubPhaseContext fetchSubPhaseContext, SearchContext searchContext) throws Exception;
-
- /**
- * Return the ContextFactory for this FetchSubPhase.
- */
- protected abstract FetchSubPhase.ContextFactory<SubPhaseContext> getContextFactory();
-}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java
new file mode 100644
index 0000000000..5d6c587e23
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Context used to fetch the {@code stored_fields}.
+ */
+public class StoredFieldsContext implements Writeable {
+ public static final String _NONE_ = "_none_";
+
+ private final List<String> fieldNames;
+ private boolean fetchFields;
+
+ private StoredFieldsContext(boolean fetchFields) {
+ this.fetchFields = fetchFields;
+ this.fieldNames = null;
+ }
+
+ private StoredFieldsContext(List<String> fieldNames) {
+ Objects.requireNonNull(fieldNames, "fieldNames must not be null");
+ this.fetchFields = true;
+ this.fieldNames = new ArrayList<>(fieldNames);
+ }
+
+ public StoredFieldsContext(StoredFieldsContext other) {
+ this.fetchFields = other.fetchFields();
+ if (other.fieldNames() != null) {
+ this.fieldNames = new ArrayList<>(other.fieldNames());
+ } else {
+ this.fieldNames = null;
+ }
+ }
+
+ public StoredFieldsContext(StreamInput in) throws IOException {
+ this.fetchFields = in.readBoolean();
+ if (fetchFields) {
+ this.fieldNames = (List<String>) in.readGenericValue();
+ } else {
+ this.fieldNames = null;
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(fetchFields);
+ if (fetchFields) {
+ out.writeGenericValue(fieldNames);
+ }
+ }
+
+ /**
+ * Gets the field names to load and return as part of the search request.
+ */
+ public List<String> fieldNames() {
+ return fieldNames;
+ }
+
+ /**
+ * Adds the field names {@code fieldNames} to the list of fields to load.
+ */
+ public StoredFieldsContext addFieldNames(List<String> fieldNames) {
+ if (fetchFields == false || fieldNames.contains(_NONE_)) {
+ throw new IllegalArgumentException("cannot combine _none_ with other fields");
+ }
+ this.fieldNames.addAll(fieldNames);
+ return this;
+ }
+
+ /**
+ * Adds a field name {@code field} to the list of fields to load.
+ */
+ public StoredFieldsContext addFieldName(String field) {
+ if (fetchFields == false || _NONE_.equals(field)) {
+ throw new IllegalArgumentException("cannot combine _none_ with other fields");
+ }
+ this.fieldNames.add(field);
+ return this;
+ }
+
+ /**
+ * Returns true if the stored fields should be fetched, false otherwise.
+ */
+ public boolean fetchFields() {
+ return fetchFields;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ StoredFieldsContext that = (StoredFieldsContext) o;
+
+ if (fetchFields != that.fetchFields) return false;
+ return fieldNames != null ? fieldNames.equals(that.fieldNames) : that.fieldNames == null;
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result = fieldNames != null ? fieldNames.hashCode() : 0;
+ result = 31 * result + (fetchFields ? 1 : 0);
+ return result;
+ }
+
+ public void toXContent(String preferredName, XContentBuilder builder) throws IOException {
+ if (fetchFields == false) {
+ builder.field(preferredName, _NONE_);
+ }
+ if (fieldNames != null) {
+ if (fieldNames.size() == 1) {
+ builder.field(preferredName, fieldNames.get(0));
+ } else {
+ builder.startArray(preferredName);
+ for (String fieldName : fieldNames) {
+ builder.value(fieldName);
+ }
+ builder.endArray();
+ }
+ }
+ }
+
+ public static StoredFieldsContext fromList(List<String> fieldNames) {
+ if (fieldNames.size() == 1 && _NONE_.equals(fieldNames.get(0))) {
+ return new StoredFieldsContext(false);
+ }
+ if (fieldNames.contains(_NONE_)) {
+ throw new IllegalArgumentException("cannot combine _none_ with other fields");
+ }
+ return new StoredFieldsContext(fieldNames);
+ }
+
+ public static StoredFieldsContext fromXContent(String fieldName, QueryParseContext context) throws IOException {
+ XContentParser parser = context.parser();
+ XContentParser.Token token = parser.currentToken();
+
+ if (token == XContentParser.Token.VALUE_STRING) {
+ return fromList(Collections.singletonList(parser.text()));
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ ArrayList<String> list = new ArrayList<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ list.add(parser.text());
+ }
+ return fromList(list);
+ } else {
+ throw new ParsingException(parser.getTokenLocation(),
+ "Expected [" + XContentParser.Token.VALUE_STRING + "] or ["
+ + XContentParser.Token.START_ARRAY + "] in [" + fieldName + "] but found [" + token + "]",
+ parser.getTokenLocation());
+ }
+ }
+
+ public static StoredFieldsContext fromRestRequest(String name, RestRequest request) {
+ String sField = request.param(name);
+ if (sField != null) {
+ String[] sFields = Strings.splitStringByCommaToArray(sField);
+ return fromList(Arrays.asList(sFields));
+ }
+ return null;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java
index 54185734f9..325d28e459 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java
@@ -18,38 +18,23 @@
*/
package org.elasticsearch.search.fetch.subphase;
-import org.elasticsearch.search.fetch.FetchSubPhaseContext;
-
-import java.util.ArrayList;
import java.util.List;
/**
* All the required context to pull a field from the doc values.
*/
-public class DocValueFieldsContext extends FetchSubPhaseContext {
-
- public static class DocValueField {
- private final String name;
+public class DocValueFieldsContext {
- public DocValueField(String name) {
- this.name = name;
- }
-
- public String name() {
- return name;
- }
- }
-
- private List<DocValueField> fields = new ArrayList<>();
-
- public DocValueFieldsContext() {
- }
+ private final List<String> fields;
- public void add(DocValueField field) {
- this.fields.add(field);
+ public DocValueFieldsContext(List<String> fields) {
+ this.fields = fields;
}
- public List<DocValueField> fields() {
+ /**
+ * Returns the required docvalue fields
+ */
+ public List<String> fields() {
return this.fields;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java
index 803cbb4348..befce94a9e 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java
@@ -36,35 +36,21 @@ import java.util.HashMap;
*/
public final class DocValueFieldsFetchSubPhase implements FetchSubPhase {
- public static final String NAME = "docvalue_fields";
- public static final ContextFactory<DocValueFieldsContext> CONTEXT_FACTORY = new ContextFactory<DocValueFieldsContext>() {
-
- @Override
- public String getName() {
- return NAME;
- }
-
- @Override
- public DocValueFieldsContext newContextInstance() {
- return new DocValueFieldsContext();
- }
- };
-
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
- if (context.getFetchSubPhaseContext(CONTEXT_FACTORY).hitExecutionNeeded() == false) {
+ if (context.docValueFieldsContext() == null) {
return;
}
- for (DocValueFieldsContext.DocValueField field : context.getFetchSubPhaseContext(CONTEXT_FACTORY).fields()) {
+ for (String field : context.docValueFieldsContext().fields()) {
if (hitContext.hit().fieldsOrNull() == null) {
hitContext.hit().fields(new HashMap<>(2));
}
- SearchHitField hitField = hitContext.hit().fields().get(field.name());
+ SearchHitField hitField = hitContext.hit().fields().get(field);
if (hitField == null) {
- hitField = new InternalSearchHitField(field.name(), new ArrayList<>(2));
- hitContext.hit().fields().put(field.name(), hitField);
+ hitField = new InternalSearchHitField(field, new ArrayList<>(2));
+ hitContext.hit().fields().put(field, hitField);
}
- MappedFieldType fieldType = context.mapperService().fullName(field.name());
+ MappedFieldType fieldType = context.mapperService().fullName(field);
if (fieldType != null) {
AtomicFieldData data = context.fieldData().getForField(fieldType).load(hitContext.readerContext());
ScriptDocValues values = data.getScriptValues();
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java
index 864de1628a..212f8d724d 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java
@@ -21,15 +21,15 @@ package org.elasticsearch.search.fetch.subphase;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.rest.RestRequest;
import java.io.IOException;
@@ -40,7 +40,7 @@ import java.util.List;
/**
* Context used to fetch the {@code _source}.
*/
-public class FetchSourceContext implements Streamable, ToXContent {
+public class FetchSourceContext implements Writeable, ToXContent {
public static final ParseField INCLUDES_FIELD = new ParseField("includes", "include");
public static final ParseField EXCLUDES_FIELD = new ParseField("excludes", "exclude");
@@ -51,9 +51,9 @@ public class FetchSourceContext implements Streamable, ToXContent {
private String[] includes;
private String[] excludes;
- public static FetchSourceContext parse(QueryParseContext context) throws IOException {
+ public static FetchSourceContext parse(XContentParser parser) throws IOException {
FetchSourceContext fetchSourceContext = new FetchSourceContext();
- fetchSourceContext.fromXContent(context);
+ fetchSourceContext.fromXContent(parser, ParseFieldMatcher.STRICT);
return fetchSourceContext;
}
@@ -88,6 +88,19 @@ public class FetchSourceContext implements Streamable, ToXContent {
this.excludes = excludes == null ? Strings.EMPTY_ARRAY : excludes;
}
+ public FetchSourceContext(StreamInput in) throws IOException {
+ fetchSource = in.readBoolean();
+ includes = in.readStringArray();
+ excludes = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(fetchSource);
+ out.writeStringArray(includes);
+ out.writeStringArray(excludes);
+ }
+
public boolean fetchSource() {
return this.fetchSource;
}
@@ -148,8 +161,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
return null;
}
- public void fromXContent(QueryParseContext context) throws IOException {
- XContentParser parser = context.parser();
+ public void fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
XContentParser.Token token = parser.currentToken();
boolean fetchSource = true;
String[] includes = Strings.EMPTY_ARRAY;
@@ -170,7 +182,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
- if (context.getParseFieldMatcher().match(currentFieldName, INCLUDES_FIELD)) {
+ if (parseFieldMatcher.match(currentFieldName, INCLUDES_FIELD)) {
List<String> includesList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
@@ -181,7 +193,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
}
}
includes = includesList.toArray(new String[includesList.size()]);
- } else if (context.getParseFieldMatcher().match(currentFieldName, EXCLUDES_FIELD)) {
+ } else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) {
List<String> excludesList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
@@ -197,10 +209,13 @@ public class FetchSourceContext implements Streamable, ToXContent {
+ " in [" + currentFieldName + "].", parser.getTokenLocation());
}
} else if (token == XContentParser.Token.VALUE_STRING) {
- if (context.getParseFieldMatcher().match(currentFieldName, INCLUDES_FIELD)) {
+ if (parseFieldMatcher.match(currentFieldName, INCLUDES_FIELD)) {
includes = new String[] {parser.text()};
- } else if (context.getParseFieldMatcher().match(currentFieldName, EXCLUDES_FIELD)) {
+ } else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) {
excludes = new String[] {parser.text()};
+ } else {
+ throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token
+ + " in [" + currentFieldName + "].", parser.getTokenLocation());
}
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
@@ -230,22 +245,6 @@ public class FetchSourceContext implements Streamable, ToXContent {
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- fetchSource = in.readBoolean();
- includes = in.readStringArray();
- excludes = in.readStringArray();
- in.readBoolean(); // Used to be transformSource but that was dropped in 2.1
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeBoolean(fetchSource);
- out.writeStringArray(includes);
- out.writeStringArray(excludes);
- out.writeBoolean(false); // Used to be transformSource but that was dropped in 2.1
- }
-
- @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java
index 7ba24442a7..fe5a9f286c 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java
@@ -35,19 +35,24 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
if (context.sourceRequested() == false) {
return;
}
+ SourceLookup source = context.lookup().source();
FetchSourceContext fetchSourceContext = context.fetchSourceContext();
assert fetchSourceContext.fetchSource();
if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
- hitContext.hit().sourceRef(context.lookup().source().internalSourceRef());
+ hitContext.hit().sourceRef(source.internalSourceRef());
return;
}
- SourceLookup source = context.lookup().source();
+ if (source.internalSourceRef() == null) {
+ throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
+ "for index [" + context.indexShard().shardId().getIndexName() + "]");
+ }
+
Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes());
try {
final int initialCapacity = Math.min(1024, source.internalSourceRef().length());
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
- XContentBuilder builder = new XContentBuilder(context.lookup().source().sourceContentType().xContent(), streamOutput);
+ XContentBuilder builder = new XContentBuilder(source.sourceContentType().xContent(), streamOutput);
builder.value(value);
hitContext.hit().sourceRef(builder.bytes());
} catch (IOException e) {
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java
index cbcab09976..44a6b13fd4 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java
@@ -80,7 +80,7 @@ public final class InnerHitsContext {
public void addInnerHitDefinition(BaseInnerHits innerHit) {
if (innerHits.containsKey(innerHit.getName())) {
throw new IllegalArgumentException("inner_hit definition with the name [" + innerHit.getName() +
- "] already exists. Use a different inner_hit name");
+ "] already exists. Use a different inner_hit name or define one explicitly");
}
innerHits.put(innerHit.getName(), innerHit);
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java
index 47f78c6ce5..ccfbf3515f 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java
@@ -38,6 +38,9 @@ public final class ParentFieldSubFetchPhase implements FetchSubPhase {
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
+ if (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false) {
+ return ;
+ }
ParentFieldMapper parentFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).parentFieldMapper();
if (parentFieldMapper.active() == false) {
return;
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/VersionFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/VersionFetchSubPhase.java
index 884cf6d2bb..1ce102e364 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/VersionFetchSubPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/VersionFetchSubPhase.java
@@ -31,7 +31,8 @@ public final class VersionFetchSubPhase implements FetchSubPhase {
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
- if (context.version() == false) {
+ if (context.version() == false ||
+ (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) {
return;
}
long version = Versions.NOT_FOUND;
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java
index b62d28f8ab..5ff1df9c66 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java
@@ -78,10 +78,7 @@ public final class CustomQueryScorer extends QueryScorer {
@Override
protected void extractUnknownQuery(Query query,
Map<String, WeightedSpanTerm> terms) throws IOException {
- if (query instanceof FunctionScoreQuery) {
- query = ((FunctionScoreQuery) query).getSubQuery();
- extract(query, 1F, terms);
- } else if (query instanceof FiltersFunctionScoreQuery) {
+ if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, 1F, terms);
} else if (terms.isEmpty()) {
@@ -90,16 +87,14 @@ public final class CustomQueryScorer extends QueryScorer {
}
protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> terms) throws IOException {
- if (query instanceof GeoPointInBBoxQuery) {
- // skip all geo queries, see https://issues.apache.org/jira/browse/LUCENE-7293 and
- // https://github.com/elastic/elasticsearch/issues/17537
- return;
- } else if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
+ if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
// skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999
return;
+ } else if (query instanceof FunctionScoreQuery) {
+ super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms);
+ } else {
+ super.extract(query, boost, terms);
}
-
- super.extract(query, boost, terms);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
index fe4587826c..a063b2900d 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
@@ -476,7 +476,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
builder.field(FRAGMENT_OFFSET_FIELD.getPreferredName(), fragmentOffset);
}
if (matchedFields != null) {
- builder.field(MATCHED_FIELDS_FIELD.getPreferredName(), matchedFields);
+ builder.array(MATCHED_FIELDS_FIELD.getPreferredName(), matchedFields);
}
builder.endObject();
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
index 9e132e4013..5084f9ecad 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
@@ -27,7 +27,6 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -38,13 +37,13 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchExtBuilder;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSearchResult;
-import org.elasticsearch.search.fetch.FetchSubPhase;
-import org.elasticsearch.search.fetch.FetchSubPhaseContext;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
@@ -70,13 +69,38 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
+ public boolean hasStoredFields() {
+ return in.hasStoredFields();
+ }
+
+ @Override
+ public boolean hasStoredFieldsContext() {
+ return in.hasStoredFieldsContext();
+ }
+
+ @Override
+ public boolean storedFieldsRequested() {
+ return in.storedFieldsRequested();
+ }
+
+ @Override
+ public StoredFieldsContext storedFieldsContext() {
+ return in.storedFieldsContext();
+ }
+
+ @Override
+ public SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext) {
+ return in.storedFieldsContext(storedFieldsContext);
+ }
+
+ @Override
protected void doClose() {
in.doClose();
}
@Override
- public void preProcess() {
- in.preProcess();
+ public void preProcess(boolean rewrite) {
+ in.preProcess(rewrite);
}
@Override
@@ -235,11 +259,6 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
- public AnalysisService analysisService() {
- return in.analysisService();
- }
-
- @Override
public SimilarityService similarityService() {
return in.similarityService();
}
@@ -374,20 +393,6 @@ public abstract class FilteredSearchContext extends SearchContext {
return in.size(size);
}
- @Override
- public boolean hasFieldNames() {
- return in.hasFieldNames();
- }
-
- @Override
- public List<String> fieldNames() {
- return in.fieldNames();
- }
-
- @Override
- public void emptyFieldNames() {
- in.emptyFieldNames();
- }
@Override
public boolean explain() {
@@ -500,8 +505,13 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
- public <SubPhaseContext extends FetchSubPhaseContext> SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory<SubPhaseContext> contextFactory) {
- return in.getFetchSubPhaseContext(contextFactory);
+ public void addSearchExt(SearchExtBuilder searchExtBuilder) {
+ in.addSearchExt(searchExtBuilder);
+ }
+
+ @Override
+ public SearchExtBuilder getSearchExt(String name) {
+ return in.getSearchExt(name);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
index 9f5054dccd..227fe90ee6 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
@@ -39,7 +39,6 @@ import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
-import org.elasticsearch.search.internal.InternalSearchHits.StreamContext.ShardTargetType;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
@@ -100,9 +99,17 @@ public class InternalSearchHit implements SearchHit {
}
+ public InternalSearchHit(int docId) {
+ this(docId, null, null, null);
+ }
+
public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) {
this.docId = docId;
- this.id = new Text(id);
+ if (id != null) {
+ this.id = new Text(id);
+ } else {
+ this.id = null;
+ }
this.type = type;
this.fields = fields;
}
@@ -168,7 +175,7 @@ public class InternalSearchHit implements SearchHit {
@Override
public String id() {
- return id.string();
+ return id != null ? id.string() : null;
}
@Override
@@ -178,7 +185,7 @@ public class InternalSearchHit implements SearchHit {
@Override
public String type() {
- return type.string();
+ return type != null ? type.string() : null;
}
@Override
@@ -444,8 +451,12 @@ public class InternalSearchHit implements SearchHit {
if (shard != null) {
builder.field(Fields._INDEX, shard.indexText());
}
- builder.field(Fields._TYPE, type);
- builder.field(Fields._ID, id);
+ if (type != null) {
+ builder.field(Fields._TYPE, type);
+ }
+ if (id != null) {
+ builder.field(Fields._ID, id);
+ }
}
if (version != -1) {
builder.field(Fields._VERSION, version);
@@ -542,21 +553,17 @@ public class InternalSearchHit implements SearchHit {
return builder;
}
- public static InternalSearchHit readSearchHit(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
+ public static InternalSearchHit readSearchHit(StreamInput in) throws IOException {
InternalSearchHit hit = new InternalSearchHit();
- hit.readFrom(in, context);
+ hit.readFrom(in);
return hit;
}
@Override
public void readFrom(StreamInput in) throws IOException {
- readFrom(in, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
- }
-
- public void readFrom(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
score = in.readFloat();
- id = in.readText();
- type = in.readText();
+ id = in.readOptionalText();
+ type = in.readOptionalText();
nestedIdentity = in.readOptionalStreamable(InternalNestedIdentity::new);
version = in.readLong();
source = in.readBytesReference();
@@ -632,26 +639,13 @@ public class InternalSearchHit implements SearchHit {
matchedQueries[i] = in.readString();
}
}
-
- if (context.streamShardTarget() == ShardTargetType.STREAM) {
- if (in.readBoolean()) {
- shard = new SearchShardTarget(in);
- }
- } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) {
- int lookupId = in.readVInt();
- if (lookupId > 0) {
- shard = context.handleShardLookup().get(lookupId);
- }
- }
-
+ shard = in.readOptionalWriteable(SearchShardTarget::new);
size = in.readVInt();
if (size > 0) {
innerHits = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
- ShardTargetType shardTarget = context.streamShardTarget();
- InternalSearchHits value = InternalSearchHits.readSearchHits(in, context.streamShardTarget(ShardTargetType.NO_STREAM));
- context.streamShardTarget(shardTarget);
+ InternalSearchHits value = InternalSearchHits.readSearchHits(in);
innerHits.put(key, value);
}
}
@@ -659,13 +653,9 @@ public class InternalSearchHit implements SearchHit {
@Override
public void writeTo(StreamOutput out) throws IOException {
- writeTo(out, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
- }
-
- public void writeTo(StreamOutput out, InternalSearchHits.StreamContext context) throws IOException {
out.writeFloat(score);
- out.writeText(id);
- out.writeText(type);
+ out.writeOptionalText(id);
+ out.writeOptionalText(type);
out.writeOptionalStreamable(nestedIdentity);
out.writeLong(version);
out.writeBytesReference(source);
@@ -740,31 +730,14 @@ public class InternalSearchHit implements SearchHit {
out.writeString(matchedFilter);
}
}
-
- if (context.streamShardTarget() == ShardTargetType.STREAM) {
- if (shard == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- shard.writeTo(out);
- }
- } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) {
- if (shard == null) {
- out.writeVInt(0);
- } else {
- out.writeVInt(context.shardHandleLookup().get(shard));
- }
- }
-
+ out.writeOptionalWriteable(shard);
if (innerHits == null) {
out.writeVInt(0);
} else {
out.writeVInt(innerHits.size());
for (Map.Entry<String, InternalSearchHits> entry : innerHits.entrySet()) {
out.writeString(entry.getKey());
- ShardTargetType shardTarget = context.streamShardTarget();
- entry.getValue().writeTo(out, context.streamShardTarget(ShardTargetType.NO_STREAM));
- context.streamShardTarget(shardTarget);
+ entry.getValue().writeTo(out);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java
index 592d4b0751..9b82c8783a 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.internal;
-import com.carrotsearch.hppc.IntObjectHashMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -29,65 +28,12 @@ import org.elasticsearch.search.SearchShardTarget;
import java.io.IOException;
import java.util.Arrays;
-import java.util.IdentityHashMap;
import java.util.Iterator;
-import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit;
-/**
- *
- */
public class InternalSearchHits implements SearchHits {
- public static class StreamContext {
-
- public static enum ShardTargetType {
- STREAM,
- LOOKUP,
- NO_STREAM
- }
-
- private IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup = new IdentityHashMap<>();
- private IntObjectHashMap<SearchShardTarget> handleShardLookup = new IntObjectHashMap<>();
- private ShardTargetType streamShardTarget = ShardTargetType.STREAM;
-
- public StreamContext reset() {
- shardHandleLookup.clear();
- handleShardLookup.clear();
- streamShardTarget = ShardTargetType.STREAM;
- return this;
- }
-
- public IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup() {
- return shardHandleLookup;
- }
-
- public IntObjectHashMap<SearchShardTarget> handleShardLookup() {
- return handleShardLookup;
- }
-
- public ShardTargetType streamShardTarget() {
- return streamShardTarget;
- }
-
- public StreamContext streamShardTarget(ShardTargetType streamShardTarget) {
- this.streamShardTarget = streamShardTarget;
- return this;
- }
- }
-
- private static final ThreadLocal<StreamContext> cache = new ThreadLocal<StreamContext>() {
- @Override
- protected StreamContext initialValue() {
- return new StreamContext();
- }
- };
-
- public static StreamContext streamContext() {
- return cache.get().reset();
- }
-
public static InternalSearchHits empty() {
// We shouldn't use static final instance, since that could directly be returned by native transport clients
return new InternalSearchHits(EMPTY, 0, 0);
@@ -186,11 +132,6 @@ public class InternalSearchHits implements SearchHits {
return builder;
}
- public static InternalSearchHits readSearchHits(StreamInput in, StreamContext context) throws IOException {
- InternalSearchHits hits = new InternalSearchHits();
- hits.readFrom(in, context);
- return hits;
- }
public static InternalSearchHits readSearchHits(StreamInput in) throws IOException {
InternalSearchHits hits = new InternalSearchHits();
@@ -200,63 +141,27 @@ public class InternalSearchHits implements SearchHits {
@Override
public void readFrom(StreamInput in) throws IOException {
- readFrom(in, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
- }
-
- public void readFrom(StreamInput in, StreamContext context) throws IOException {
totalHits = in.readVLong();
maxScore = in.readFloat();
int size = in.readVInt();
if (size == 0) {
hits = EMPTY;
} else {
- if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
- // read the lookup table first
- int lookupSize = in.readVInt();
- for (int i = 0; i < lookupSize; i++) {
- context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in));
- }
- }
-
hits = new InternalSearchHit[size];
for (int i = 0; i < hits.length; i++) {
- hits[i] = readSearchHit(in, context);
+ hits[i] = readSearchHit(in);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
- writeTo(out, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
- }
-
- public void writeTo(StreamOutput out, StreamContext context) throws IOException {
out.writeVLong(totalHits);
out.writeFloat(maxScore);
out.writeVInt(hits.length);
if (hits.length > 0) {
- if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
- // start from 1, 0 is for null!
- int counter = 1;
- for (InternalSearchHit hit : hits) {
- if (hit.shard() != null) {
- Integer handle = context.shardHandleLookup().get(hit.shard());
- if (handle == null) {
- context.shardHandleLookup().put(hit.shard(), counter++);
- }
- }
- }
- out.writeVInt(context.shardHandleLookup().size());
- if (!context.shardHandleLookup().isEmpty()) {
- for (Map.Entry<SearchShardTarget, Integer> entry : context.shardHandleLookup().entrySet()) {
- out.writeVInt(entry.getValue());
- entry.getKey().writeTo(out);
- }
- }
- }
-
for (InternalSearchHit hit : hits) {
- hit.writeTo(out, context);
+ hit.writeTo(out);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
index 0c257191c2..63a1995b08 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
@@ -30,8 +30,9 @@ import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
+import org.elasticsearch.common.util.concurrent.RefCounted;
import org.elasticsearch.common.util.iterable.Iterables;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -42,13 +43,14 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchExtBuilder;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSearchResult;
-import org.elasticsearch.search.fetch.FetchSubPhase;
-import org.elasticsearch.search.fetch.FetchSubPhaseContext;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
+import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
@@ -64,10 +66,20 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicBoolean;
-public abstract class SearchContext implements Releasable {
+/**
+ * This class encapsulates the state needed to execute a search. It holds a reference to the
+ * shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on
+ * state from one query / fetch phase to another.
+ *
+ * This class also implements {@link RefCounted} since in some situations like in {@link org.elasticsearch.search.SearchService}
+ * a SearchContext can be closed concurrently due to independent events ie. when an index gets removed. To prevent accessing closed
+ * IndexReader / IndexSearcher instances the SearchContext can be guarded by a reference count and fail if it's been closed by
+ * an external event.
+ */
+// For reference why we use RefCounted here see #20095
+public abstract class SearchContext extends AbstractRefCounted implements Releasable {
private static ThreadLocal<SearchContext> current = new ThreadLocal<>();
public static final int DEFAULT_TERMINATE_AFTER = 0;
@@ -91,6 +103,7 @@ public abstract class SearchContext implements Releasable {
protected final ParseFieldMatcher parseFieldMatcher;
protected SearchContext(ParseFieldMatcher parseFieldMatcher) {
+ super("search_context");
this.parseFieldMatcher = parseFieldMatcher;
}
@@ -100,23 +113,34 @@ public abstract class SearchContext implements Releasable {
@Override
public final void close() {
- if (closed.compareAndSet(false, true)) { // prevent double release
- try {
- clearReleasables(Lifetime.CONTEXT);
- } finally {
- doClose();
- }
+ if (closed.compareAndSet(false, true)) { // prevent double closing
+ decRef();
}
}
private boolean nowInMillisUsed;
+ @Override
+ protected final void closeInternal() {
+ try {
+ clearReleasables(Lifetime.CONTEXT);
+ } finally {
+ doClose();
+ }
+ }
+
+ @Override
+ protected void alreadyClosed() {
+ throw new IllegalStateException("search context is already closed can't increment refCount current count [" + refCount() + "]");
+ }
+
protected abstract void doClose();
/**
* Should be called before executing the main query and after all other parameters have been set.
+ * @param rewrite if the set query should be rewritten against the searcher returned from {@link #searcher()}
*/
- public abstract void preProcess();
+ public abstract void preProcess(boolean rewrite);
public abstract Query searchFilter(String[] types);
@@ -161,7 +185,9 @@ public abstract class SearchContext implements Releasable {
public abstract SearchContext aggregations(SearchContextAggregations aggregations);
- public abstract <SubPhaseContext extends FetchSubPhaseContext> SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory<SubPhaseContext> contextFactory);
+ public abstract void addSearchExt(SearchExtBuilder searchExtBuilder);
+
+ public abstract SearchExtBuilder getSearchExt(String name);
public abstract SearchContextHighlight highlight();
@@ -200,14 +226,16 @@ public abstract class SearchContext implements Releasable {
public abstract SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext);
+ public abstract DocValueFieldsContext docValueFieldsContext();
+
+ public abstract SearchContext docValueFieldsContext(DocValueFieldsContext docValueFieldsContext);
+
public abstract ContextIndexSearcher searcher();
public abstract IndexShard indexShard();
public abstract MapperService mapperService();
- public abstract AnalysisService analysisService();
-
public abstract SimilarityService similarityService();
public abstract ScriptService scriptService();
@@ -265,11 +293,18 @@ public abstract class SearchContext implements Releasable {
public abstract SearchContext size(int size);
- public abstract boolean hasFieldNames();
+ public abstract boolean hasStoredFields();
+
+ public abstract boolean hasStoredFieldsContext();
+
+ /**
+ * A shortcut function to see whether there is a storedFieldsContext and it says the fields are requested.
+ */
+ public abstract boolean storedFieldsRequested();
- public abstract List<String> fieldNames();
+ public abstract StoredFieldsContext storedFieldsContext();
- public abstract void emptyFieldNames();
+ public abstract SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext);
public abstract boolean explain();
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
index d025d573c1..0d6148011e 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
@@ -81,14 +81,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
this.nowInMillis = nowInMillis;
}
- public ShardSearchLocalRequest(String[] types, long nowInMillis) {
+ public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) {
this.types = types;
this.nowInMillis = nowInMillis;
- }
-
- public ShardSearchLocalRequest(String[] types, long nowInMillis, String[] filteringAliases) {
- this(types, nowInMillis);
this.filteringAliases = filteringAliases;
+ this.shardId = shardId;
}
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java
index 2116300c19..2eb2d34dd2 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java
@@ -22,8 +22,10 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.Counter;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
@@ -33,12 +35,8 @@ import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
-/**
- */
public class SubSearchContext extends FilteredSearchContext {
// By default return 3 hits per bucket. A higher default would make the response really large by default, since
@@ -58,9 +56,10 @@ public class SubSearchContext extends FilteredSearchContext {
private int docsIdsToLoadFrom;
private int docsIdsToLoadSize;
- private List<String> fieldNames;
+ private StoredFieldsContext storedFields;
private ScriptFieldsContext scriptFields;
private FetchSourceContext fetchSourceContext;
+ private DocValueFieldsContext docValueFieldsContext;
private SearchContextHighlight highlight;
private boolean explain;
@@ -78,7 +77,7 @@ public class SubSearchContext extends FilteredSearchContext {
}
@Override
- public void preProcess() {
+ public void preProcess(boolean rewrite) {
}
@Override
@@ -156,6 +155,17 @@ public class SubSearchContext extends FilteredSearchContext {
}
@Override
+ public DocValueFieldsContext docValueFieldsContext() {
+ return docValueFieldsContext;
+ }
+
+ @Override
+ public SearchContext docValueFieldsContext(DocValueFieldsContext docValueFieldsContext) {
+ this.docValueFieldsContext = docValueFieldsContext;
+ return this;
+ }
+
+ @Override
public void timeout(TimeValue timeout) {
throw new UnsupportedOperationException("Not supported");
}
@@ -239,21 +249,29 @@ public class SubSearchContext extends FilteredSearchContext {
}
@Override
- public boolean hasFieldNames() {
- return fieldNames != null;
+ public boolean hasStoredFields() {
+ return storedFields != null && storedFields.fieldNames() != null;
}
@Override
- public List<String> fieldNames() {
- if (fieldNames == null) {
- fieldNames = new ArrayList<>();
- }
- return fieldNames;
+ public boolean hasStoredFieldsContext() {
+ return storedFields != null;
+ }
+
+ @Override
+ public boolean storedFieldsRequested() {
+ return storedFields != null && storedFields.fetchFields();
}
@Override
- public void emptyFieldNames() {
- this.fieldNames = Collections.emptyList();
+ public StoredFieldsContext storedFieldsContext() {
+ return storedFields;
+ }
+
+ @Override
+ public SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext) {
+ this.storedFields = storedFieldsContext;
+ return this;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java
index 75c90ded70..addf910bc5 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java
@@ -19,6 +19,7 @@
package org.elasticsearch.search.profile.aggregation;
+import org.apache.lucene.search.Scorer;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
@@ -40,4 +41,9 @@ public class ProfilingLeafBucketCollector extends LeafBucketCollector {
profileBreakdown.stopAndRecordTime();
}
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ delegate.setScorer(scorer);
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index 189fead781..d1e90b2e9a 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -68,7 +68,8 @@ import java.util.List;
import java.util.concurrent.Callable;
/**
- *
+ * Query phase of a search request, used to run the query and get back from each shard information about the matching documents
+ * (document ids and score or sort criteria) so that matches can be reduced on the coordinating node
*/
public class QueryPhase implements SearchPhase {
@@ -84,7 +85,7 @@ public class QueryPhase implements SearchPhase {
@Override
public void preProcess(SearchContext context) {
- context.preProcess();
+ context.preProcess(true);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index e583cfbf13..92afb067a5 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -21,7 +21,6 @@ package org.elasticsearch.search.query;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.TopDocs;
-import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -42,9 +41,6 @@ import static java.util.Collections.emptyList;
import static org.elasticsearch.common.lucene.Lucene.readTopDocs;
import static org.elasticsearch.common.lucene.Lucene.writeTopDocs;
-/**
- *
- */
public class QuerySearchResult extends QuerySearchResultProvider {
private long id;
@@ -209,7 +205,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
public void readFromWithId(long id, StreamInput in) throws IOException {
this.id = id;
-// shardTarget = readSearchShardTarget(in);
from = in.readVInt();
size = in.readVInt();
int numSortFieldsPlus1 = in.readVInt();
@@ -232,10 +227,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
-
- if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
- profileShardResults = new ProfileShardResult(in);
- }
+ profileShardResults = in.readOptionalWriteable(ProfileShardResult::new);
}
@Override
@@ -246,7 +238,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
public void writeToNoId(StreamOutput out) throws IOException {
-// shardTarget.writeTo(out);
out.writeVInt(from);
out.writeVInt(size);
if (sortValueFormats == null) {
@@ -273,14 +264,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly);
-
- if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
- if (profileShardResults == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- profileShardResults.writeTo(out);
- }
- }
+ out.writeOptionalWriteable(profileShardResults);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
index 395db4cdcd..d3d4c75cd7 100644
--- a/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
+++ b/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
@@ -29,6 +29,7 @@ import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
/**
+ * Rescore phase of a search request, used to run potentially expensive scoring models against the top matching documents.
*/
public class RescorePhase extends AbstractComponent implements SearchPhase {
diff --git a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java
index 6ed4b0db5b..dce83bea7e 100644
--- a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java
@@ -202,7 +202,7 @@ public class SearchAfterBuilder implements ToXContent, Writeable {
}
void innerToXContent(XContentBuilder builder) throws IOException {
- builder.field(SEARCH_AFTER.getPreferredName(), sortValues);
+ builder.array(SEARCH_AFTER.getPreferredName(), sortValues);
}
public static SearchAfterBuilder fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
index f33dd0e2b1..bcecc2ca49 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -19,6 +19,7 @@
package org.elasticsearch.search.sort;
+import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.DocIdSetIterator;
@@ -46,6 +47,7 @@ import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import org.elasticsearch.index.fielddata.NumericDoubleValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
+import org.elasticsearch.index.fielddata.plain.AbstractLatLonPointDVIndexFieldData.LatLonPointDVIndexFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.GeoValidationMethod;
import org.elasticsearch.index.query.QueryBuilder;
@@ -243,7 +245,7 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
}
/**
- * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
+ * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#METERS}
*/
public GeoDistanceSortBuilder unit(DistanceUnit unit) {
this.unit = unit;
@@ -251,7 +253,7 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
}
/**
- * Returns the distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
+ * Returns the distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#METERS}
*/
public DistanceUnit unit() {
return this.unit;
@@ -550,13 +552,23 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort");
}
final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType);
+ final Nested nested = resolveNested(context, nestedPath, nestedFilter);
+
+ if (geoIndexFieldData.getClass() == LatLonPointDVIndexFieldData.class // only works with 5.x geo_point
+ && nested == null
+ && finalSortMode == MultiValueMode.MIN // LatLonDocValuesField internally picks the closest point
+ && unit == DistanceUnit.METERS
+ && localPoints.size() == 1) {
+ return new SortFieldAndFormat(
+ LatLonDocValuesField.newDistanceSort(fieldName, localPoints.get(0).lat(), localPoints.get(0).lon()),
+ DocValueFormat.RAW);
+ }
+
final FixedSourceDistance[] distances = new FixedSourceDistance[localPoints.size()];
- for (int i = 0; i< localPoints.size(); i++) {
+ for (int i = 0; i < localPoints.size(); i++) {
distances[i] = geoDistance.fixedSourceDistance(localPoints.get(i).lat(), localPoints.get(i).lon(), unit);
}
- final Nested nested = resolveNested(context, nestedPath, nestedFilter);
-
IndexFieldData.XFieldComparatorSource geoDistanceComparatorSource = new IndexFieldData.XFieldComparatorSource() {
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
index 8c0436361c..0a7cb5e1b3 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
@@ -244,7 +244,7 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
currentName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseField.match(currentName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseField);
+ script = Script.parse(parser, parseField, context.getDefaultScriptLanguage());
} else if (parseField.match(currentName, NESTED_FILTER_FIELD)) {
nestedFilter = context.parseInnerQueryBuilder();
} else {
@@ -260,7 +260,7 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
} else if (parseField.match(currentName, NESTED_PATH_FIELD)) {
nestedPath = parser.text();
} else if (parseField.match(currentName, ScriptField.SCRIPT)) {
- script = Script.parse(parser, parseField);
+ script = Script.parse(parser, parseField, context.getDefaultScriptLanguage());
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
index c0567e59e8..874448b924 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
@@ -35,6 +35,7 @@ import java.util.List;
import java.util.Map;
/**
+ * Suggest phase of a search request, used to collect suggestions
*/
public class SuggestPhase extends AbstractComponent implements SearchPhase {
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java
index 59555e049c..ca1d7a2306 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java
@@ -319,7 +319,7 @@ public abstract class SuggestionBuilder<T extends SuggestionBuilder<T>> extends
suggestionContext.setAnalyzer(fieldType.searchAnalyzer());
}
} else {
- Analyzer luceneAnalyzer = mapperService.analysisService().analyzer(analyzer);
+ Analyzer luceneAnalyzer = mapperService.getIndexAnalyzers().get(analyzer);
if (luceneAnalyzer == null) {
throw new IllegalArgumentException("analyzer [" + analyzer + "] doesn't exists");
}
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java
index 8453371078..c86c056522 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java
@@ -27,7 +27,6 @@ import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
-import org.elasticsearch.search.internal.InternalSearchHits.StreamContext.ShardTargetType;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
@@ -261,8 +260,7 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
super.readFrom(in);
this.doc = Lucene.readScoreDoc(in);
if (in.readBoolean()) {
- this.hit = InternalSearchHit.readSearchHit(in,
- InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
+ this.hit = InternalSearchHit.readSearchHit(in);
}
int contextSize = in.readInt();
this.contexts = new LinkedHashMap<>(contextSize);
@@ -283,7 +281,7 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
Lucene.writeScoreDoc(out, doc);
if (hit != null) {
out.writeBoolean(true);
- hit.writeTo(out, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
+ hit.writeTo(out);
} else {
out.writeBoolean(false);
}
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java
index 854a5fb0e1..bd1449bbfe 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java
@@ -207,7 +207,8 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
if (field instanceof StringField) {
spare.resetFromString(field.stringValue());
} else {
- spare.resetFromIndexHash(Long.parseLong(field.stringValue()));
+ // todo return this to .stringValue() once LatLonPoint implements it
+ spare.resetFromIndexableField(field);
}
geohashes.add(spare.geohash());
}
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java
index 5fcbf9db57..245f2416b4 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/Completion090PostingsFormat.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.search.suggest.completion2x;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
@@ -41,7 +42,7 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.lucene.util.Version;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.mapper.CompletionFieldMapper2x;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -74,7 +75,7 @@ public class Completion090PostingsFormat extends PostingsFormat {
public static final int SUGGEST_VERSION_CURRENT = SUGGEST_CODEC_VERSION;
public static final String EXTENSION = "cmp";
- private static final ESLogger logger = Loggers.getLogger(Completion090PostingsFormat.class);
+ private static final Logger logger = Loggers.getLogger(Completion090PostingsFormat.class);
private PostingsFormat delegatePostingsFormat;
private static final Map<String, CompletionLookupProvider> providers;
private CompletionLookupProvider writeProvider;
@@ -127,7 +128,7 @@ public class Completion090PostingsFormat extends PostingsFormat {
boolean success = false;
try {
output = state.directory.createOutput(suggestFSTFile, state.context);
- CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT);
+ CodecUtil.writeIndexHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
/*
* we write the delegate postings format name so we can load it
* without getting an instance in the ctor
@@ -165,7 +166,13 @@ public class Completion090PostingsFormat extends PostingsFormat {
public CompletionFieldsProducer(SegmentReadState state) throws IOException {
String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
IndexInput input = state.directory.openInput(suggestFSTFile, state.context);
- version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
+ if (state.segmentInfo.getVersion().onOrAfter(Version.LUCENE_6_2_0)) {
+ // Lucene 6.2.0+ requires all index files to use index header, but prior to that we used an ordinary codec header:
+ version = CodecUtil.checkIndexHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT,
+ state.segmentInfo.getId(), state.segmentSuffix);
+ } else {
+ version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
+ }
FieldsProducer delegateProducer = null;
boolean success = false;
try {
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/context/GeolocationContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/context/GeolocationContextMapping.java
index 21031d36cc..5eed19ca00 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/context/GeolocationContextMapping.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/context/GeolocationContextMapping.java
@@ -208,7 +208,7 @@ public class GeolocationContextMapping extends ContextMapping {
@Override
protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException {
- builder.field(FIELD_PRECISION, precision);
+ builder.array(FIELD_PRECISION, precision);
builder.field(FIELD_NEIGHBORS, neighbors);
if (defaultLocations != null) {
builder.startArray(FIELD_MISSING);
@@ -741,7 +741,7 @@ public class GeolocationContextMapping extends ContextMapping {
} else {
builder.startObject(name);
builder.field(FIELD_VALUE, location);
- builder.field(FIELD_PRECISION, precisions);
+ builder.array(FIELD_PRECISION, precisions);
builder.endObject();
}
return builder;
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java
index 9e3beb2ccf..bf9158f9b8 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java
@@ -395,13 +395,13 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator
generator.setField(this.field);
transferIfNotNull(this.size, generator::size);
if (this.preFilter != null) {
- generator.preFilter(mapperService.analysisService().analyzer(this.preFilter));
+ generator.preFilter(mapperService.getIndexAnalyzers().get(this.preFilter));
if (generator.preFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.preFilter + "] doesn't exists");
}
}
if (this.postFilter != null) {
- generator.postFilter(mapperService.analysisService().analyzer(this.postFilter));
+ generator.postFilter(mapperService.getIndexAnalyzers().get(this.postFilter));
if (generator.postFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.postFilter + "] doesn't exists");
}
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
index 94ad7b8fad..b0cd6a2049 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
@@ -173,7 +173,7 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSuggestionB
} else {
out.writeBoolean(false);
}
- out.writeMap(collateParams);
+ out.writeMapWithConsistentOrder(collateParams);
out.writeOptionalBoolean(collatePrune);
out.writeVInt(this.generators.size());
for (Entry<String, List<CandidateGenerator>> entry : this.generators.entrySet()) {
diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index 7ab579aa45..5e30a3b52b 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -22,6 +22,8 @@ import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.IndicesOptions;
@@ -41,11 +43,10 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.UUIDs;
@@ -63,7 +64,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
@@ -109,13 +109,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
* First {@link #restoreSnapshot(RestoreRequest, org.elasticsearch.action.ActionListener)}
* method reads information about snapshot and metadata from repository. In update cluster state task it checks restore
* preconditions, restores global state if needed, creates {@link RestoreInProgress} record with list of shards that needs
- * to be restored and adds this shard to the routing table using {@link org.elasticsearch.cluster.routing.RoutingTable.Builder#addAsRestore(IndexMetaData, RestoreSource)}
+ * to be restored and adds this shard to the routing table using {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)}
* method.
* <p>
* Individual shards are getting restored as part of normal recovery process in
* {@link IndexShard#restoreFromRepository(Repository)} )}
* method, which detects that shard should be restored from snapshot rather than recovered from gateway by looking
- * at the {@link org.elasticsearch.cluster.routing.ShardRouting#restoreSource()} property.
+ * at the {@link ShardRouting#recoverySource()} property.
* <p>
* At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(Snapshot, ShardId)},
* which updates {@link RestoreInProgress} in cluster state or removes it when all shards are completed. In case of
@@ -241,7 +241,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
- RestoreSource restoreSource = new RestoreSource(snapshot, snapshotInfo.version(), index);
+ SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
@@ -257,7 +257,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
if (currentIndexMetaData == null) {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
- createIndexService.validateIndexName(renamedIndexName, currentState);
+ MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings());
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
@@ -273,7 +273,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
if (partial) {
populateIgnoredShards(index, ignoreShards);
}
- rtBuilder.addAsNewRestore(updatedIndexMetaData, restoreSource, ignoreShards);
+ rtBuilder.addAsNewRestore(updatedIndexMetaData, recoverySource, ignoreShards);
blocks.addBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
@@ -298,7 +298,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
- rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);
+ rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
blocks.updateBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
@@ -335,10 +335,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
RoutingTable rt = rtBuilder.build();
ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
- RoutingAllocation.Result routingResult = allocationService.reroute(
- ClusterState.builder(updatedState).routingTable(rt).build(),
- "restored snapshot [" + snapshot + "]");
- return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ return allocationService.reroute(updatedState, "restored snapshot [" + snapshot + "]");
}
private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
@@ -461,7 +458,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public void onFailure(String source, Exception e) {
- logger.warn("[{}] failed to restore snapshot", e, snapshotId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
listener.onFailure(e);
}
@@ -478,7 +475,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
} catch (Exception e) {
- logger.warn("[{}] failed to restore snapshot", e, request.repositoryName + ":" + request.snapshotName);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
listener.onFailure(e);
}
}
@@ -602,7 +599,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public void onFailure(String source, @Nullable Exception e) {
for (UpdateIndexShardRestoreStatusRequest request : drainedRequests) {
- logger.warn("[{}][{}] failed to update snapshot status to [{}]", e, request.snapshot(), request.shardId(), request.status());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
}
}
@@ -670,7 +667,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
try {
listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
} catch (Exception e) {
- logger.warn("failed to update snapshot status for [{}]", e, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to update snapshot status for [{}]", listener), e);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
index 5f0979e38d..b7f2c6af4a 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
@@ -30,15 +30,15 @@ import java.io.IOException;
public class SnapshotMissingException extends SnapshotException {
public SnapshotMissingException(final String repositoryName, final SnapshotId snapshotId, final Throwable cause) {
- super(repositoryName, snapshotId, "is missing", cause);
+ super(repositoryName, snapshotId, " is missing", cause);
}
public SnapshotMissingException(final String repositoryName, final SnapshotId snapshotId) {
- super(repositoryName, snapshotId, "is missing");
+ super(repositoryName, snapshotId, " is missing");
}
public SnapshotMissingException(final String repositoryName, final String snapshotName) {
- super(repositoryName, snapshotName, "is missing");
+ super(repositoryName, snapshotName, " is missing");
}
public SnapshotMissingException(StreamInput in) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
index e957d2deb6..1f7a4ee4fd 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
@@ -20,6 +20,8 @@
package org.elasticsearch.snapshots;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexCommit;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -312,7 +314,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
@Override
public void onFailure(Exception e) {
- logger.warn("[{}] [{}] failed to create snapshot", e, shardId, entry.getKey());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e);
updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e)));
}
@@ -494,7 +496,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
}
} catch (Exception e) {
- logger.warn("[{}] [{}] failed to update snapshot state", e, request.snapshot(), request.status());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e);
}
}
@@ -578,7 +580,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
@Override
public void onFailure(String source, Exception e) {
for (UpdateIndexShardSnapshotStatusRequest request : drainedRequests) {
- logger.warn("[{}][{}] failed to update snapshot status to [{}]", e, request.snapshot(), request.shardId(), request.status());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
}
}
});
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
index a0c1ddf1ea..ea8deea566 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
@@ -21,6 +21,8 @@ package org.elasticsearch.snapshots;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
@@ -180,7 +182,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
snapshotSet.add(repository.getSnapshotInfo(snapshotId));
} catch (Exception ex) {
if (ignoreUnavailable) {
- logger.warn("failed to get snapshot [{}]", ex, snapshotId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex);
} else {
throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex);
}
@@ -254,7 +256,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn("[{}][{}] failed to create snapshot", e, repositoryName, snapshotName);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
newSnapshot = null;
listener.onFailure(e);
}
@@ -405,7 +407,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn("[{}] failed to create snapshot", e, snapshot.snapshot().getSnapshotId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
}
@@ -427,7 +429,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
}
});
} catch (Exception e) {
- logger.warn("failed to create snapshot [{}]", e, snapshot.snapshot().getSnapshotId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
}
}
@@ -469,7 +471,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
Collections.emptyList());
} catch (Exception inner) {
inner.addSuppressed(exception);
- logger.warn("[{}] failed to close snapshot in repository", inner, snapshot.snapshot());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
}
}
userCreateSnapshotListener.onFailure(e);
@@ -722,7 +724,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn("failed to update snapshot state after shards started from [{}] ", e, source);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
}
});
}
@@ -876,7 +878,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
SnapshotInfo snapshotInfo = repository.finalizeSnapshot(snapshot.getSnapshotId(), entry.indices(), entry.startTime(), failure, entry.shards().size(), Collections.unmodifiableList(shardFailures));
removeSnapshotFromClusterState(snapshot, snapshotInfo, null);
} catch (Exception e) {
- logger.warn("[{}] failed to finalize snapshot", e, snapshot);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e);
removeSnapshotFromClusterState(snapshot, null, e);
}
}
@@ -925,7 +927,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn("[{}] failed to remove snapshot metadata", e, snapshot);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e);
if (listener != null) {
listener.onFailure(e);
}
@@ -941,7 +943,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
listener.onSnapshotFailure(snapshot, failure);
}
} catch (Exception t) {
- logger.warn("failed to notify listener [{}]", t, listener);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t);
}
}
if (listener != null) {
diff --git a/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java
index 002d0b6a46..c3eeaa6ee8 100644
--- a/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java
+++ b/core/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java
@@ -19,7 +19,9 @@
package org.elasticsearch.tasks;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.logging.Loggers;
/**
@@ -27,7 +29,7 @@ import org.elasticsearch.common.logging.Loggers;
* need a listener but aren't returning the result to the user.
*/
public final class LoggingTaskListener<Response> implements TaskListener<Response> {
- private static final ESLogger logger = Loggers.getLogger(LoggingTaskListener.class);
+ private static final Logger logger = Loggers.getLogger(LoggingTaskListener.class);
/**
* Get the instance of NoopActionListener cast appropriately.
@@ -49,6 +51,6 @@ public final class LoggingTaskListener<Response> implements TaskListener<Respons
@Override
public void onFailure(Task task, Throwable e) {
- logger.warn("{} failed with exception", e, task.getId());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java
index 271586779e..9792b894b5 100644
--- a/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java
+++ b/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java
@@ -180,7 +180,7 @@ public final class TaskInfo implements Writeable, ToXContent {
if (description != null) {
builder.field("description", description);
}
- builder.dateValueField("start_time_in_millis", "start_time", startTime);
+ builder.dateField("start_time_in_millis", "start_time", startTime);
builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS);
builder.field("cancellable", cancellable);
if (parentTaskId.isSet()) {
diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java
index f0fea6aa2a..003a51c317 100644
--- a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java
+++ b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java
@@ -19,6 +19,8 @@
package org.elasticsearch.tasks;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
@@ -166,7 +168,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
try {
taskResult = task.result(localNode, error);
} catch (IOException ex) {
- logger.warn("couldn't store error {}", ex, ExceptionsHelper.detailedMessage(error));
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex);
listener.onFailure(ex);
return;
}
@@ -178,7 +181,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
@Override
public void onFailure(Exception e) {
- logger.warn("couldn't store error {}", e, ExceptionsHelper.detailedMessage(error));
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e);
listener.onFailure(e);
}
});
@@ -199,7 +203,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
try {
taskResult = task.result(localNode, response);
} catch (IOException ex) {
- logger.warn("couldn't store response {}", ex, response);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), ex);
listener.onFailure(ex);
return;
}
@@ -212,7 +216,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
@Override
public void onFailure(Exception e) {
- logger.warn("couldn't store response {}", e, response);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), e);
listener.onFailure(e);
}
});
diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java
index 4b68e8af97..fd515c5733 100644
--- a/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java
+++ b/core/src/main/java/org/elasticsearch/tasks/TaskResultsService.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.tasks;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@@ -163,7 +165,9 @@ public class TaskResultsService extends AbstractComponent {
Streams.copy(is, out);
return out.toString(IOUtils.UTF_8);
} catch (Exception e) {
- logger.error("failed to create tasks results index template [{}]", e, TASK_RESULT_INDEX_MAPPING_FILE);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e);
}
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index 044f19eaee..d2ff4defc9 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -19,13 +19,15 @@
package org.elasticsearch.threadpool;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
@@ -413,7 +415,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
try {
runnable.run();
} catch (Exception e) {
- logger.warn("failed to run {}", e, runnable.toString());
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e);
throw e;
}
}
@@ -527,18 +529,14 @@ public class ThreadPool extends AbstractComponent implements Closeable {
}
}
- public static class Info implements Streamable, ToXContent {
+ public static class Info implements Writeable, ToXContent {
- private String name;
- private ThreadPoolType type;
- private int min;
- private int max;
- private TimeValue keepAlive;
- private SizeValue queueSize;
-
- Info() {
-
- }
+ private final String name;
+ private final ThreadPoolType type;
+ private final int min;
+ private final int max;
+ private final TimeValue keepAlive;
+ private final SizeValue queueSize;
public Info(String name, ThreadPoolType type) {
this(name, type, -1);
@@ -557,6 +555,25 @@ public class ThreadPool extends AbstractComponent implements Closeable {
this.queueSize = queueSize;
}
+ public Info(StreamInput in) throws IOException {
+ name = in.readString();
+ type = ThreadPoolType.fromType(in.readString());
+ min = in.readInt();
+ max = in.readInt();
+ keepAlive = in.readOptionalWriteable(TimeValue::new);
+ queueSize = in.readOptionalWriteable(SizeValue::new);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(type.getType());
+ out.writeInt(min);
+ out.writeInt(max);
+ out.writeOptionalWriteable(keepAlive);
+ out.writeOptionalWriteable(queueSize);
+ }
+
public String getName() {
return this.name;
}
@@ -584,46 +601,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- name = in.readString();
- type = ThreadPoolType.fromType(in.readString());
- min = in.readInt();
- max = in.readInt();
- if (in.readBoolean()) {
- keepAlive = new TimeValue(in);
- }
- if (in.readBoolean()) {
- queueSize = SizeValue.readSizeValue(in);
- }
- in.readBoolean(); // here to conform with removed waitTime
- in.readBoolean(); // here to conform with removed rejected setting
- in.readBoolean(); // here to conform with queue type
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(name);
- out.writeString(type.getType());
- out.writeInt(min);
- out.writeInt(max);
- if (keepAlive == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- keepAlive.writeTo(out);
- }
- if (queueSize == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- queueSize.writeTo(out);
- }
- out.writeBoolean(false); // here to conform with removed waitTime
- out.writeBoolean(false); // here to conform with removed rejected setting
- out.writeBoolean(false); // here to conform with queue type
- }
-
- @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(Fields.TYPE, type.getType());
@@ -652,7 +629,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
static final String KEEP_ALIVE = "keep_alive";
static final String QUEUE_SIZE = "queue_size";
}
-
}
/**
@@ -779,14 +755,14 @@ public class ThreadPool extends AbstractComponent implements Closeable {
@Override
public void onFailure(Exception e) {
- threadPool.logger.warn("failed to run scheduled task [{}] on thread pool [{}]", e, runnable.toString(), executor);
+ threadPool.logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", runnable.toString(), executor), e);
}
@Override
public void onRejection(Exception e) {
run = false;
if (threadPool.logger.isDebugEnabled()) {
- threadPool.logger.debug("scheduled task [{}] was rejected on thread pool [{}]", e, runnable, executor);
+ threadPool.logger.debug((Supplier<?>) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", runnable, executor), e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java
index 729c6cb736..70c0f2c959 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java
@@ -21,57 +21,35 @@ package org.elasticsearch.threadpool;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.ArrayList;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-/**
- */
-public class ThreadPoolInfo implements Streamable, Iterable<ThreadPool.Info>, ToXContent {
-
- private List<ThreadPool.Info> infos;
-
- ThreadPoolInfo() {
- }
+public class ThreadPoolInfo implements Writeable, Iterable<ThreadPool.Info>, ToXContent {
+ private final List<ThreadPool.Info> infos;
public ThreadPoolInfo(List<ThreadPool.Info> infos) {
- this.infos = infos;
- }
-
- @Override
- public Iterator<ThreadPool.Info> iterator() {
- return infos.iterator();
+ this.infos = Collections.unmodifiableList(infos);
}
- public static ThreadPoolInfo readThreadPoolInfo(StreamInput in) throws IOException {
- ThreadPoolInfo info = new ThreadPoolInfo();
- info.readFrom(in);
- return info;
+ public ThreadPoolInfo(StreamInput in) throws IOException {
+ this.infos = Collections.unmodifiableList(in.readList(ThreadPool.Info::new));
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- int size = in.readVInt();
- infos = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- ThreadPool.Info info = new ThreadPool.Info();
- info.readFrom(in);
- infos.add(info);
- }
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeList(infos);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(infos.size());
- for (ThreadPool.Info info : infos) {
- info.writeTo(out);
- }
+ public Iterator<ThreadPool.Info> iterator() {
+ return infos.iterator();
}
static final class Fields {
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
index fdbbaef19d..ead076fc83 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
@@ -21,33 +21,26 @@ package org.elasticsearch.threadpool;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-/**
- */
-public class ThreadPoolStats implements Streamable, ToXContent, Iterable<ThreadPoolStats.Stats> {
-
- public static class Stats implements Streamable, ToXContent, Comparable<Stats> {
+public class ThreadPoolStats implements Writeable, ToXContent, Iterable<ThreadPoolStats.Stats> {
- private String name;
- private int threads;
- private int queue;
- private int active;
- private long rejected;
- private int largest;
- private long completed;
+ public static class Stats implements Writeable, ToXContent, Comparable<Stats> {
- Stats() {
-
- }
+ private final String name;
+ private final int threads;
+ private final int queue;
+ private final int active;
+ private final long rejected;
+ private final int largest;
+ private final long completed;
public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) {
this.name = name;
@@ -59,6 +52,27 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable<ThreadP
this.completed = completed;
}
+ public Stats(StreamInput in) throws IOException {
+ name = in.readString();
+ threads = in.readInt();
+ queue = in.readInt();
+ active = in.readInt();
+ rejected = in.readLong();
+ largest = in.readInt();
+ completed = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeInt(threads);
+ out.writeInt(queue);
+ out.writeInt(active);
+ out.writeLong(rejected);
+ out.writeInt(largest);
+ out.writeLong(completed);
+ }
+
public String getName() {
return this.name;
}
@@ -88,28 +102,6 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable<ThreadP
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- name = in.readString();
- threads = in.readInt();
- queue = in.readInt();
- active = in.readInt();
- rejected = in.readLong();
- largest = in.readInt();
- completed = in.readLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(name);
- out.writeInt(threads);
- out.writeInt(queue);
- out.writeInt(active);
- out.writeLong(rejected);
- out.writeInt(largest);
- out.writeLong(completed);
- }
-
- @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
if (threads != -1) {
@@ -154,43 +146,23 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable<ThreadP
private List<Stats> stats;
- ThreadPoolStats() {
-
- }
-
public ThreadPoolStats(List<Stats> stats) {
Collections.sort(stats);
this.stats = stats;
}
- @Override
- public Iterator<Stats> iterator() {
- return stats.iterator();
- }
-
- public static ThreadPoolStats readThreadPoolStats(StreamInput in) throws IOException {
- ThreadPoolStats stats = new ThreadPoolStats();
- stats.readFrom(in);
- return stats;
+ public ThreadPoolStats(StreamInput in) throws IOException {
+ stats = in.readList(Stats::new);
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- int size = in.readVInt();
- stats = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- Stats stats1 = new Stats();
- stats1.readFrom(in);
- stats.add(stats1);
- }
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeList(stats);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(stats.size());
- for (Stats stat : stats) {
- stat.writeTo(out);
- }
+ public Iterator<Stats> iterator() {
+ return stats.iterator();
}
static final class Fields {
diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
index d567e83813..0dd0c05dca 100644
--- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
@@ -20,6 +20,8 @@ package org.elasticsearch.transport;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@@ -106,8 +108,8 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent implements Transport {
- public static final String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker";
- public static final String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss";
+ public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker";
+ public static final String TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX = "transport_server_boss";
public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker";
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
@@ -143,7 +145,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
Setting.Property.NodeScope);
- private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().bytes() * 0.9);
+ private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9);
private static final int PING_DATA_SIZE = -1;
protected final int connectionsPerNodeRecovery;
@@ -258,10 +260,13 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
sendMessage(channel, pingHeader, successfulPings::inc, false);
} catch (Exception e) {
if (isOpen(channel)) {
- logger.debug("[{}] failed to send ping transport message", e, node);
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
failedPings.inc();
} else {
- logger.trace("[{}] failed to send ping transport message (channel closed)", e, node);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "[{}] failed to send ping transport message (channel closed)", node), e);
}
}
}
@@ -397,7 +402,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
try {
nodeChannels = connectToChannels(node);
} catch (Exception e) {
- logger.trace("failed to connect to [{}], cleaning dangling connections", e, node);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to connect to [{}], cleaning dangling connections", node), e);
throw e;
}
}
@@ -772,7 +779,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
try {
closeChannels(entry.getValue());
} catch (Exception e) {
- logger.debug("Error closing serverChannel for profile [{}]", e, entry.getKey());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Error closing serverChannel for profile [{}]", entry.getKey()), e);
}
}
@@ -802,21 +811,27 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
return;
}
if (isCloseConnectionException(e)) {
- logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e,
- channel);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "close connection exception caught on transport layer [{}], disconnecting from relevant node",
+ channel),
+ e);
// close the channel, which will cause a node to be disconnected if relevant
disconnectFromNodeChannel(channel, e);
} else if (isConnectException(e)) {
- logger.trace("connect exception caught on transport layer [{}]", e, channel);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
disconnectFromNodeChannel(channel, e);
} else if (e instanceof BindException) {
- logger.trace("bind exception caught on transport layer [{}]", e, channel);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
disconnectFromNodeChannel(channel, e);
} else if (e instanceof CancelledKeyException) {
- logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e,
- channel);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "cancelled key exception caught on transport layer [{}], disconnecting from relevant node",
+ channel),
+ e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
disconnectFromNodeChannel(channel, e);
} else if (e instanceof TcpTransport.HttpOnTransportException) {
@@ -825,7 +840,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), () -> {}, true);
}
} else {
- logger.warn("exception caught on transport layer [{}], closing connection", e, channel);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
// close the channel, which will cause a node to be disconnected if relevant
disconnectFromNodeChannel(channel, e);
}
@@ -1260,7 +1276,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
try {
handler.handleException(rtx);
} catch (Exception e) {
- logger.error("failed to handle exception response [{}]", e, handler);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
}
});
}
@@ -1297,7 +1313,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
transportChannel.sendResponse(e);
} catch (IOException inner) {
inner.addSuppressed(e);
- logger.warn("Failed to send error message back to client for action [{}]", inner, action);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Failed to send error message back to client for action [{}]", action), inner);
}
}
return action;
@@ -1343,7 +1361,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
transportChannel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("Failed to send error message back to client for action [{}]", inner, reg.getAction());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Failed to send error message back to client for action [{}]", reg.getAction()), inner);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
index 5f30296409..3d46c0853e 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
@@ -19,7 +19,8 @@
package org.elasticsearch.transport;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
@@ -30,12 +31,12 @@ import java.util.function.Supplier;
*/
public class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
- private final ESLogger logger;
+ private final Logger logger;
private final TransportChannel channel;
private final String extraInfoOnError;
private final Supplier<T> responseSupplier;
- public TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError,
+ public TransportChannelResponseHandler(Logger logger, TransportChannel channel, String extraInfoOnError,
Supplier<T> responseSupplier) {
this.logger = logger;
this.channel = channel;
@@ -62,7 +63,12 @@ public class TransportChannelResponseHandler<T extends TransportResponse> implem
try {
channel.sendResponse(exp);
} catch (IOException e) {
- logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")");
+ logger.debug(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage(
+ "failed to send failure {}",
+ extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"),
+ e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportInfo.java b/core/src/main/java/org/elasticsearch/transport/TransportInfo.java
index 236c0d50a9..fbabf49b65 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportInfo.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportInfo.java
@@ -22,7 +22,7 @@ package org.elasticsearch.transport;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -31,56 +31,17 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
-/**
- *
- */
-public class TransportInfo implements Streamable, ToXContent {
+public class TransportInfo implements Writeable, ToXContent {
private BoundTransportAddress address;
private Map<String, BoundTransportAddress> profileAddresses;
- TransportInfo() {
- }
-
public TransportInfo(BoundTransportAddress address, @Nullable Map<String, BoundTransportAddress> profileAddresses) {
this.address = address;
this.profileAddresses = profileAddresses;
}
- static final class Fields {
- static final String TRANSPORT = "transport";
- static final String BOUND_ADDRESS = "bound_address";
- static final String PUBLISH_ADDRESS = "publish_address";
- static final String PROFILES = "profiles";
- }
-
- @Override
- public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject(Fields.TRANSPORT);
- builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
- builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
- builder.startObject(Fields.PROFILES);
- if (profileAddresses != null && profileAddresses.size() > 0) {
- for (Map.Entry<String, BoundTransportAddress> entry : profileAddresses.entrySet()) {
- builder.startObject(entry.getKey());
- builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses());
- builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString());
- builder.endObject();
- }
- }
- builder.endObject();
- builder.endObject();
- return builder;
- }
-
- public static TransportInfo readTransportInfo(StreamInput in) throws IOException {
- TransportInfo info = new TransportInfo();
- info.readFrom(in);
- return info;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
+ public TransportInfo(StreamInput in) throws IOException {
address = BoundTransportAddress.readBoundTransportAddress(in);
int size = in.readVInt();
if (size > 0) {
@@ -109,6 +70,32 @@ public class TransportInfo implements Streamable, ToXContent {
}
}
+ static final class Fields {
+ static final String TRANSPORT = "transport";
+ static final String BOUND_ADDRESS = "bound_address";
+ static final String PUBLISH_ADDRESS = "publish_address";
+ static final String PROFILES = "profiles";
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.TRANSPORT);
+ builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
+ builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
+ builder.startObject(Fields.PROFILES);
+ if (profileAddresses != null && profileAddresses.size() > 0) {
+ for (Map.Entry<String, BoundTransportAddress> entry : profileAddresses.entrySet()) {
+ builder.startObject(entry.getKey());
+ builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses());
+ builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString());
+ builder.endObject();
+ }
+ }
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
public BoundTransportAddress address() {
return address;
}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java
new file mode 100644
index 0000000000..d8072a81ba
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+import java.util.function.Supplier;
+
+/**
+ * This interface allows plugins to intercept requests on both the sender and the receiver side.
+ */
+public interface TransportInterceptor {
+ /**
+ * This is called for each handler that is registered via
+ * {@link TransportService#registerRequestHandler(String, Supplier, String, boolean, boolean, TransportRequestHandler)} or
+ * {@link TransportService#registerRequestHandler(String, Supplier, String, TransportRequestHandler)}. The returned handler is
+ * used instead of the passed in handler. By default the provided handler is returned.
+ */
+ default <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
+ TransportRequestHandler<T> actualHandler) {
+ return actualHandler;
+ }
+
+ /**
+ * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request.
+ * The returned sender is used to send all requests that come in via
+ * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportResponseHandler)} or
+ * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}.
+ * This allows plugins to perform actions on each send request including modifying the request context etc.
+ */
+ default AsyncSender interceptSender(AsyncSender sender) {
+ return sender;
+ }
+
+ /**
+ * A simple interface to decorate
+ * {@link #sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}
+ */
+ interface AsyncSender {
+ <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
+ final TransportRequestOptions options, TransportResponseHandler<T> handler);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java
index 15164a5d20..8c5886f731 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportService.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.transport;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
import org.elasticsearch.cluster.ClusterName;
@@ -29,7 +31,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
@@ -65,9 +66,6 @@ import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static org.elasticsearch.common.settings.Setting.listSetting;
-/**
- *
- */
public class TransportService extends AbstractLifecycleComponent {
public static final String DIRECT_RESPONSE_PROFILE = ".direct";
@@ -78,16 +76,19 @@ public class TransportService extends AbstractLifecycleComponent {
protected final ThreadPool threadPool;
protected final ClusterName clusterName;
protected final TaskManager taskManager;
+ private final TransportInterceptor.AsyncSender asyncSender;
volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
final Object requestHandlerMutex = new Object();
final ConcurrentMapLong<RequestHolder> clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
- final AtomicLong requestIds = new AtomicLong();
+ private final AtomicLong requestIds = new AtomicLong();
final CopyOnWriteArrayList<TransportConnectionListener> connectionListeners = new CopyOnWriteArrayList<>();
+ private final TransportInterceptor interceptor;
+
// An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they
// do show up, we can print more descriptive information about them
final Map<Long, TimeoutInfoHolder> timeoutInfoHandlers =
@@ -100,6 +101,8 @@ public class TransportService extends AbstractLifecycleComponent {
private final TransportService.Adapter adapter;
+ public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {};
+
// tracer log
public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING =
@@ -108,7 +111,7 @@ public class TransportService extends AbstractLifecycleComponent {
listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME),
Function.identity(), Property.Dynamic, Property.NodeScope);
- private final ESLogger tracerLog;
+ private final Logger tracerLog;
volatile String[] tracerLogInclude;
volatile String[] tracelLogExclude;
@@ -117,7 +120,7 @@ public class TransportService extends AbstractLifecycleComponent {
volatile DiscoveryNode localNode = null;
@Inject
- public TransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor) {
super(settings);
this.transport = transport;
this.threadPool = threadPool;
@@ -127,6 +130,8 @@ public class TransportService extends AbstractLifecycleComponent {
tracerLog = Loggers.getLogger(logger, ".tracer");
adapter = createAdapter();
taskManager = createTaskManager();
+ this.interceptor = transportInterceptor;
+ this.asyncSender = interceptor.interceptSender(this::sendRequestInternal);
}
/**
@@ -205,11 +210,19 @@ public class TransportService extends AbstractLifecycleComponent {
@Override
public void onRejection(Exception e) {
// if we get rejected during node shutdown we don't wanna bubble it up
- logger.debug("failed to notify response handler on rejection, action: {}", e, holderToNotify.action());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify response handler on rejection, action: {}",
+ holderToNotify.action()),
+ e);
}
@Override
public void onFailure(Exception e) {
- logger.warn("failed to notify response handler on exception, action: {}", e, holderToNotify.action());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify response handler on exception, action: {}",
+ holderToNotify.action()),
+ e);
}
@Override
public void doRun() {
@@ -232,11 +245,11 @@ public class TransportService extends AbstractLifecycleComponent {
* when the transport layer starts up it will block any incoming requests until
* this method is called
*/
- public void acceptIncomingRequests() {
+ public final void acceptIncomingRequests() {
blockIncomingRequestsLatch.countDown();
}
- public boolean addressSupported(Class<? extends TransportAddress> address) {
+ public final boolean addressSupported(Class<? extends TransportAddress> address) {
return transport.addressSupported(address);
}
@@ -433,13 +446,23 @@ public class TransportService extends AbstractLifecycleComponent {
return futureHandler;
}
- public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
- final TransportResponseHandler<T> handler) {
+ public final <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action,
+ final TransportRequest request,
+ final TransportResponseHandler<T> handler) {
sendRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
}
- public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
- final TransportRequestOptions options, TransportResponseHandler<T> handler) {
+ public final <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action,
+ final TransportRequest request,
+ final TransportRequestOptions options,
+ TransportResponseHandler<T> handler) {
+ asyncSender.sendRequest(node, action, request, options, handler);
+ }
+
+ private <T extends TransportResponse> void sendRequestInternal(final DiscoveryNode node, final String action,
+ final TransportRequest request,
+ final TransportRequestOptions options,
+ TransportResponseHandler<T> handler) {
if (node == null) {
throw new IllegalStateException("can't send request to a null node");
}
@@ -483,11 +506,19 @@ public class TransportService extends AbstractLifecycleComponent {
@Override
public void onRejection(Exception e) {
// if we get rejected during node shutdown we don't wanna bubble it up
- logger.debug("failed to notify response handler on rejection, action: {}", e, holderToNotify.action());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify response handler on rejection, action: {}",
+ holderToNotify.action()),
+ e);
}
@Override
public void onFailure(Exception e) {
- logger.warn("failed to notify response handler on exception, action: {}", e, holderToNotify.action());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify response handler on exception, action: {}",
+ holderToNotify.action()),
+ e);
}
@Override
protected void doRun() throws Exception {
@@ -528,7 +559,9 @@ public class TransportService extends AbstractLifecycleComponent {
channel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("failed to notify channel of error message for action [{}]", inner, action);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify channel of error message for action [{}]", action), inner);
}
}
});
@@ -539,7 +572,9 @@ public class TransportService extends AbstractLifecycleComponent {
channel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("failed to notify channel of error message for action [{}]", inner, action);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify channel of error message for action [{}]", action), inner);
}
}
@@ -573,8 +608,9 @@ public class TransportService extends AbstractLifecycleComponent {
* @param executor The executor the request handling will be executed on
* @param handler The handler itself that implements the request handling
*/
- public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory, String executor,
- TransportRequestHandler<Request> handler) {
+ public final <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory,
+ String executor, TransportRequestHandler<Request> handler) {
+ handler = interceptor.interceptHandler(action, handler);
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(
action, requestFactory, taskManager, handler, executor, false, true);
registerRequestHandler(reg);
@@ -590,28 +626,22 @@ public class TransportService extends AbstractLifecycleComponent {
* @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached.
* @param handler The handler itself that implements the request handling
*/
- public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request,
+ public final <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request,
String executor, boolean forceExecution,
boolean canTripCircuitBreaker,
TransportRequestHandler<Request> handler) {
+ handler = interceptor.interceptHandler(action, handler);
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(
action, request, taskManager, handler, executor, forceExecution, canTripCircuitBreaker);
registerRequestHandler(reg);
}
- protected <Request extends TransportRequest> void registerRequestHandler(RequestHandlerRegistry<Request> reg) {
+ private <Request extends TransportRequest> void registerRequestHandler(RequestHandlerRegistry<Request> reg) {
synchronized (requestHandlerMutex) {
- RequestHandlerRegistry replaced = requestHandlers.get(reg.getAction());
- requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap();
- if (replaced != null) {
- logger.warn("registered two transport handlers for action {}, handlers: {}, {}", reg.getAction(), reg, replaced);
+ if (requestHandlers.containsKey(reg.getAction())) {
+ throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered");
}
- }
- }
-
- public void removeHandler(String action) {
- synchronized (requestHandlerMutex) {
- requestHandlers = MapBuilder.newMapBuilder(requestHandlers).remove(action).immutableMap();
+ requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap();
}
}
@@ -661,7 +691,9 @@ public class TransportService extends AbstractLifecycleComponent {
}
protected void traceResponseSent(long requestId, String action, Exception e) {
- tracerLog.trace("[{}][{}] sent error response", e, requestId, action);
+ tracerLog.trace(
+ (org.apache.logging.log4j.util.Supplier<?>)
+ () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e);
}
@Override
@@ -728,12 +760,9 @@ public class TransportService extends AbstractLifecycleComponent {
@Override
public void raiseNodeConnected(final DiscoveryNode node) {
- threadPool.generic().execute(new Runnable() {
- @Override
- public void run() {
- for (TransportConnectionListener connectionListener : connectionListeners) {
- connectionListener.onNodeConnected(node);
- }
+ threadPool.generic().execute(() -> {
+ for (TransportConnectionListener connectionListener : connectionListeners) {
+ connectionListener.onNodeConnected(node);
}
});
}
@@ -742,12 +771,7 @@ public class TransportService extends AbstractLifecycleComponent {
public void raiseNodeDisconnected(final DiscoveryNode node) {
try {
for (final TransportConnectionListener connectionListener : connectionListeners) {
- threadPool.generic().execute(new Runnable() {
- @Override
- public void run() {
- connectionListener.onNodeDisconnected(node);
- }
- });
+ threadPool.generic().execute(() -> connectionListener.onNodeDisconnected(node));
}
for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
RequestHolder holder = entry.getValue();
@@ -756,12 +780,8 @@ public class TransportService extends AbstractLifecycleComponent {
if (holderToNotify != null) {
// callback that an exception happened, but on a different thread since we don't
// want handlers to worry about stack overflows
- threadPool.generic().execute(new Runnable() {
- @Override
- public void run() {
- holderToNotify.handler().handleException(new NodeDisconnectedException(node, holderToNotify.action()));
- }
- });
+ threadPool.generic().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException(node,
+ holderToNotify.action())));
}
}
}
@@ -941,14 +961,14 @@ public class TransportService extends AbstractLifecycleComponent {
}
static class DirectResponseChannel implements TransportChannel {
- final ESLogger logger;
+ final Logger logger;
final DiscoveryNode localNode;
private final String action;
private final long requestId;
final TransportServiceAdapter adapter;
final ThreadPool threadPool;
- public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId,
+ public DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId,
TransportServiceAdapter adapter, ThreadPool threadPool) {
this.logger = logger;
this.localNode = localNode;
@@ -1034,7 +1054,9 @@ public class TransportService extends AbstractLifecycleComponent {
try {
handler.handleException(rtx);
} catch (Exception e) {
- logger.error("failed to handle exception for action [{}], handler [{}]", e, action, handler);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to handle exception for action [{}], handler [{}]", action, handler), e);
}
}
@@ -1047,6 +1069,5 @@ public class TransportService extends AbstractLifecycleComponent {
public String getChannelType() {
return "direct";
}
-
}
}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportStats.java b/core/src/main/java/org/elasticsearch/transport/TransportStats.java
index e34197fd73..78e692939b 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportStats.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportStats.java
@@ -21,24 +21,20 @@ package org.elasticsearch.transport;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
-public class TransportStats implements Streamable, ToXContent {
+public class TransportStats implements Writeable, ToXContent {
- private long serverOpen;
- private long rxCount;
- private long rxSize;
- private long txCount;
- private long txSize;
-
- TransportStats() {
-
- }
+ private final long serverOpen;
+ private final long rxCount;
+ private final long rxSize;
+ private final long txCount;
+ private final long txSize;
public TransportStats(long serverOpen, long rxCount, long rxSize, long txCount, long txSize) {
this.serverOpen = serverOpen;
@@ -48,6 +44,23 @@ public class TransportStats implements Streamable, ToXContent {
this.txSize = txSize;
}
+ public TransportStats(StreamInput in) throws IOException {
+ serverOpen = in.readVLong();
+ rxCount = in.readVLong();
+ rxSize = in.readVLong();
+ txCount = in.readVLong();
+ txSize = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(serverOpen);
+ out.writeVLong(rxCount);
+ out.writeVLong(rxSize);
+ out.writeVLong(txCount);
+ out.writeVLong(txSize);
+ }
+
public long serverOpen() {
return this.serverOpen;
}
@@ -88,30 +101,6 @@ public class TransportStats implements Streamable, ToXContent {
return txSize();
}
- public static TransportStats readTransportStats(StreamInput in) throws IOException {
- TransportStats stats = new TransportStats();
- stats.readFrom(in);
- return stats;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- serverOpen = in.readVLong();
- rxCount = in.readVLong();
- rxSize = in.readVLong();
- txCount = in.readVLong();
- txSize = in.readVLong();
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVLong(serverOpen);
- out.writeVLong(rxCount);
- out.writeVLong(rxSize);
- out.writeVLong(txCount);
- out.writeVLong(txSize);
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.TRANSPORT);
diff --git a/core/src/main/java/org/elasticsearch/transport/Transports.java b/core/src/main/java/org/elasticsearch/transport/Transports.java
index 15bf2c833f..9b4dc4d5a6 100644
--- a/core/src/main/java/org/elasticsearch/transport/Transports.java
+++ b/core/src/main/java/org/elasticsearch/transport/Transports.java
@@ -19,6 +19,7 @@
package org.elasticsearch.transport;
+import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.transport.local.LocalTransport;
import java.util.Arrays;
@@ -38,8 +39,10 @@ public enum Transports {
final String threadName = t.getName();
for (String s : Arrays.asList(
LocalTransport.LOCAL_TRANSPORT_THREAD_NAME_PREFIX,
- TcpTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX,
- TcpTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX,
+ HttpServerTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX,
+ HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX,
+ TcpTransport.TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX,
+ TcpTransport.TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX,
TEST_MOCK_TRANSPORT_THREAD_PREFIX)) {
diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
index 61559442ff..f65312391d 100644
--- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
@@ -19,6 +19,8 @@
package org.elasticsearch.transport.local;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -27,7 +29,6 @@ import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.Lifecycle;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -71,9 +72,6 @@ import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
-/**
- *
- */
public class LocalTransport extends AbstractLifecycleComponent implements Transport {
public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport";
@@ -92,7 +90,6 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
public static final String TRANSPORT_LOCAL_WORKERS = "transport.local.workers";
public static final String TRANSPORT_LOCAL_QUEUE = "transport.local.queue";
- @Inject
public LocalTransport(Settings settings, ThreadPool threadPool,
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
super(settings);
@@ -306,7 +303,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
});
}
} else {
- logger.warn("Failed to receive message for action [{}]", e, action);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to receive message for action [{}]", action), e);
}
}
}
@@ -355,7 +352,9 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
transportChannel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("Failed to send error message back to client for action [{}]", inner, action);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Failed to send error message back to client for action [{}]", action), inner);
}
}
}
@@ -366,7 +365,9 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
transportChannel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("Failed to send error message back to client for action [{}]", inner, action);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Failed to send error message back to client for action [{}]", action), inner);
}
}
@@ -414,7 +415,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
try {
handler.handleException(rtx);
} catch (Exception e) {
- logger.error("failed to handle exception response [{}]", e, handler);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java b/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java
index 02957ab99a..d9520aef76 100644
--- a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java
+++ b/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java
@@ -24,13 +24,13 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
-import java.util.Collections;
+import java.util.Collection;
/**
* An internal node that connects to a remove cluster, as part of a tribe node.
*/
class TribeClientNode extends Node {
- TribeClientNode(Settings settings) {
- super(new Environment(settings), Collections.<Class<? extends Plugin>>emptyList());
+ TribeClientNode(Settings settings, Collection<Class<? extends Plugin>> classpathPlugins) {
+ super(new Environment(settings), classpathPlugins);
}
}
diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
index 3ca8015527..fd697340cd 100644
--- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java
+++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
@@ -19,6 +19,8 @@
package org.elasticsearch.tribe;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
@@ -56,10 +58,12 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.TransportSettings;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
@@ -180,7 +184,8 @@ public class TribeService extends AbstractLifecycleComponent {
private final List<Node> nodes = new CopyOnWriteArrayList<>();
- public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId) {
+ public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId,
+ Collection<Class<? extends Plugin>> classpathPlugins) {
super(settings);
this.clusterService = clusterService;
Map<String, Settings> nodesSettings = new HashMap<>(settings.getGroups("tribe", true));
@@ -188,7 +193,7 @@ public class TribeService extends AbstractLifecycleComponent {
nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client
for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
Settings clientSettings = buildClientSettings(entry.getKey(), tribeNodeId, settings, entry.getValue());
- nodes.add(new TribeClientNode(clientSettings));
+ nodes.add(new TribeClientNode(clientSettings, classpathPlugins));
}
this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
@@ -274,7 +279,7 @@ public class TribeService extends AbstractLifecycleComponent {
otherNode.close();
} catch (Exception inner) {
inner.addSuppressed(e);
- logger.warn("failed to close node {} on failed start", inner, otherNode);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to close node {} on failed start", otherNode), inner);
}
}
if (e instanceof RuntimeException) {
@@ -296,7 +301,7 @@ public class TribeService extends AbstractLifecycleComponent {
try {
node.close();
} catch (Exception e) {
- logger.warn("failed to close node {}", e, node);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to close node {}", node), e);
}
}
}
@@ -320,7 +325,7 @@ public class TribeService extends AbstractLifecycleComponent {
event,
ClusterStateTaskConfig.build(Priority.NORMAL),
executor,
- (source, e) -> logger.warn("failed to process [{}]", e, source));
+ (source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to process [{}]", source), e));
}
}
diff --git a/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java
index a6b0bdd840..8d5c04e770 100644
--- a/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java
+++ b/core/src/main/java/org/elasticsearch/watcher/FileWatcher.java
@@ -18,8 +18,8 @@
*/
package org.elasticsearch.watcher;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@@ -38,7 +38,7 @@ public class FileWatcher extends AbstractResourceWatcher<FileChangesListener> {
private FileObserver rootFileObserver;
private Path file;
- private static final ESLogger logger = Loggers.getLogger(FileWatcher.class);
+ private static final Logger logger = Loggers.getLogger(FileWatcher.class);
/**
* Creates new file watcher on the given directory
diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
index 2f83d56543..97ccfb31bf 100644
--- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
+++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
@@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" {
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
-grant codeBase "${codebase.lucene-core-6.1.0.jar}" {
+grant codeBase "${codebase.lucene-core-6.2.0.jar}" {
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
// java 8 package
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
@@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.1.0.jar}" {
permission java.lang.RuntimePermission "accessDeclaredMembers";
};
-grant codeBase "${codebase.lucene-misc-6.1.0.jar}" {
+grant codeBase "${codebase.lucene-misc-6.2.0.jar}" {
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
permission java.nio.file.LinkPermission "hard";
};
diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
index f2c60923f6..43f6b62c3c 100644
--- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
+++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
@@ -33,11 +33,13 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};
-grant codeBase "${codebase.lucene-test-framework-6.1.0.jar}" {
+grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" {
// needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS
permission java.nio.file.LinkPermission "hard";
+ // needed for RAMUsageTester
+ permission java.lang.RuntimePermission "accessDeclaredMembers";
};
grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" {
diff --git a/core/src/test/java/org/apache/log4j/Java9HackTests.java b/core/src/test/java/org/apache/log4j/Java9HackTests.java
deleted file mode 100644
index e917f1d306..0000000000
--- a/core/src/test/java/org/apache/log4j/Java9HackTests.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.log4j;
-
-import org.elasticsearch.test.ESTestCase;
-
-public class Java9HackTests extends ESTestCase {
- public void testJava9Hack() {
- assertNotNull(MDC.mdc.tlm != null);
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
index a3b0629e8a..0611d706ac 100644
--- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
@@ -757,7 +757,7 @@ public class ExceptionSerializationTests extends ESTestCase {
ids.put(107, org.elasticsearch.repositories.RepositoryMissingException.class);
ids.put(108, null);
ids.put(109, org.elasticsearch.index.engine.DocumentSourceMissingException.class);
- ids.put(110, org.elasticsearch.index.engine.FlushNotAllowedEngineException.class);
+ ids.put(110, null); // FlushNotAllowedEngineException was removed in 5.0
ids.put(111, org.elasticsearch.common.settings.NoClassSettingsException.class);
ids.put(112, org.elasticsearch.transport.BindTransportException.class);
ids.put(113, org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException.class);
diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java
index 934fdae254..d1d01610f1 100644
--- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java
+++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java
@@ -69,6 +69,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
import org.elasticsearch.action.termvectors.MultiTermVectorsAction;
import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsAction;
@@ -77,25 +78,23 @@ import org.elasticsearch.action.update.UpdateAction;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
-import org.elasticsearch.search.action.SearchTransportService;
+import org.elasticsearch.action.search.SearchTransportService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
-import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.Before;
@@ -109,7 +108,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
-import java.util.function.Supplier;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
@@ -117,7 +115,6 @@ import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItem;
-import static org.hamcrest.Matchers.instanceOf;
@ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2)
public class IndicesRequestIT extends ESIntegTestCase {
@@ -143,8 +140,7 @@ public class IndicesRequestIT extends ESIntegTestCase {
return Settings.builder().put(super.nodeSettings(ordinal))
// InternalClusterInfoService sends IndicesStatsRequest periodically which messes with this test
// this setting disables it...
- .put("cluster.routing.allocation.disk.threshold_enabled", false)
- .put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build();
+ .put("cluster.routing.allocation.disk.threshold_enabled", false).build();
}
@Override
@@ -638,8 +634,7 @@ public class IndicesRequestIT extends ESIntegTestCase {
assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0));
}
for (TransportRequest internalRequest : requests) {
- assertThat(internalRequest, instanceOf(IndicesRequest.class));
- IndicesRequest indicesRequest = (IndicesRequest) internalRequest;
+ IndicesRequest indicesRequest = convertRequest(internalRequest);
assertThat(internalRequest.getClass().getName(), indicesRequest.indices(), equalTo(originalRequest.indices()));
assertThat(indicesRequest.indicesOptions(), equalTo(originalRequest.indicesOptions()));
}
@@ -651,14 +646,24 @@ public class IndicesRequestIT extends ESIntegTestCase {
List<TransportRequest> requests = consumeTransportRequests(action);
assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0));
for (TransportRequest internalRequest : requests) {
- assertThat(internalRequest, instanceOf(IndicesRequest.class));
- for (String index : ((IndicesRequest) internalRequest).indices()) {
+ IndicesRequest indicesRequest = convertRequest(internalRequest);
+ for (String index : indicesRequest.indices()) {
assertThat(indices, hasItem(index));
}
}
}
}
+ static IndicesRequest convertRequest(TransportRequest request) {
+ final IndicesRequest indicesRequest;
+ if (request instanceof IndicesRequest) {
+ indicesRequest = (IndicesRequest) request;
+ } else {
+ indicesRequest = TransportReplicationActionTests.resolveRequest(request);
+ }
+ return indicesRequest;
+ }
+
private String randomIndexOrAlias() {
String index = randomFrom(indices);
if (randomBoolean()) {
@@ -692,31 +697,39 @@ public class IndicesRequestIT extends ESIntegTestCase {
}
private static void assertAllRequestsHaveBeenConsumed() {
- Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
- for (TransportService transportService : transportServices) {
- assertThat(((InterceptingTransportService)transportService).requests.entrySet(), emptyIterable());
+ Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
+ for (PluginsService pluginsService : pluginsServices) {
+ Set<Map.Entry<String, List<TransportRequest>>> entries =
+ pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
+ .instance.requests.entrySet();
+ assertThat(entries, emptyIterable());
+
}
}
private static void clearInterceptedActions() {
- Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
- for (TransportService transportService : transportServices) {
- ((InterceptingTransportService) transportService).clearInterceptedActions();
+ Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
+ for (PluginsService pluginsService : pluginsServices) {
+ pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
+ .instance.clearInterceptedActions();
}
}
private static void interceptTransportActions(String... actions) {
- Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
- for (TransportService transportService : transportServices) {
- ((InterceptingTransportService) transportService).interceptTransportActions(actions);
+ Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
+ for (PluginsService pluginsService : pluginsServices) {
+ pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
+ .instance.interceptTransportActions(actions);
}
}
private static List<TransportRequest> consumeTransportRequests(String action) {
List<TransportRequest> requests = new ArrayList<>();
- Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
- for (TransportService transportService : transportServices) {
- List<TransportRequest> transportRequests = ((InterceptingTransportService) transportService).consumeRequests(action);
+
+ Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
+ for (PluginsService pluginsService : pluginsServices) {
+ List<TransportRequest> transportRequests = pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class)
+ .stream().findFirst().get().instance.consumeRequests(action);
if (transportRequests != null) {
requests.addAll(transportRequests);
}
@@ -724,12 +737,13 @@ public class IndicesRequestIT extends ESIntegTestCase {
return requests;
}
- public static class InterceptingTransportService extends TransportService {
-
- public static class TestPlugin extends Plugin {
+ public static class InterceptingTransportService implements TransportInterceptor {
- public void onModule(NetworkModule module) {
- module.registerTransportService("intercepting", InterceptingTransportService.class);
+ public static class TestPlugin extends Plugin implements NetworkPlugin {
+ public final InterceptingTransportService instance = new InterceptingTransportService();
+ @Override
+ public List<TransportInterceptor> getTransportInterceptors() {
+ return Collections.singletonList(instance);
}
}
@@ -737,9 +751,10 @@ public class IndicesRequestIT extends ESIntegTestCase {
private final Map<String, List<TransportRequest>> requests = new HashMap<>();
- @Inject
- public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
- super(settings, transport, threadPool);
+ @Override
+ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
+ TransportRequestHandler<T> actualHandler) {
+ return new InterceptingRequestHandler<>(action, actualHandler);
}
synchronized List<TransportRequest> consumeRequests(String action) {
@@ -754,19 +769,6 @@ public class IndicesRequestIT extends ESIntegTestCase {
actions.clear();
}
- @Override
- public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request, String executor,
- boolean forceExecution, boolean canTripCircuitBreaker,
- TransportRequestHandler<Request> handler) {
- super.registerRequestHandler(action, request, executor, forceExecution, canTripCircuitBreaker, new
- InterceptingRequestHandler<>(action, handler));
- }
-
- @Override
- public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory, String
- executor, TransportRequestHandler<Request> handler) {
- super.registerRequestHandler(action, requestFactory, executor, new InterceptingRequestHandler<>(action, handler));
- }
private class InterceptingRequestHandler<T extends TransportRequest> implements TransportRequestHandler<T> {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java
index 3b26345148..97c1a20c33 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java
@@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.List;
import java.util.Map;
@@ -43,6 +44,7 @@ import static org.hamcrest.Matchers.greaterThan;
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public final class ClusterAllocationExplainIT extends ESIntegTestCase {
+ @TestLogging("_root:DEBUG")
public void testDelayShards() throws Exception {
logger.info("--> starting 3 nodes");
List<String> nodes = internalCluster().startNodesAsync(3).get();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java
index 425edeb106..577f73a89e 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java
@@ -25,6 +25,8 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
@@ -55,13 +57,13 @@ import static java.util.Collections.emptySet;
public final class ClusterAllocationExplanationTests extends ESTestCase {
private Index i = new Index("foo", "uuid");
- private ShardRouting primaryShard = ShardRouting.newUnassigned(new ShardId(i, 0), null, true,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ private ShardRouting primaryShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE,
+ new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
private IndexMetaData indexMetaData = IndexMetaData.builder("foo")
.settings(Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, "uuid"))
- .putActiveAllocationIds(0, Sets.newHashSet("aid1", "aid2"))
+ .putInSyncAllocationIds(0, Sets.newHashSet("aid1", "aid2"))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
@@ -87,16 +89,14 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
Float nodeWeight = randomFloat();
Set<String> activeAllocationIds = new HashSet<>();
activeAllocationIds.add("eggplant");
- ShardRouting primaryStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), null, true,
+ ShardRouting primaryStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
- assertTrue(primaryStartedShard.allocatedPostIndexCreate(indexMetaData));
- ShardRouting replicaStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), null, false,
+ ShardRouting replicaStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), false, PeerRecoverySource.INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
- assertTrue(replicaStartedShard.allocatedPostIndexCreate(indexMetaData));
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e);
- NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
+ NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node,
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
assertExplanations(ne, "the copy of the shard cannot be read",
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.IO_ERROR);
@@ -125,8 +125,8 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, corruptE);
- ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
- storeStatus, "", activeAllocationIds, false);
+ ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
+ nodeWeight, storeStatus, "", activeAllocationIds, false);
assertExplanations(ne, "the copy of the shard is corrupt",
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.CORRUPT);
@@ -169,8 +169,7 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null);
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(replicaStartedShard, indexMetaData, node, noDecision,
nodeWeight, storeStatus, "", activeAllocationIds, true);
- assertExplanations(ne, "the shard cannot be assigned because allocation deciders return a NO " +
- "decision and the shard's state is still being fetched",
+ assertExplanations(ne, "the shard cannot be assigned because allocation deciders return a NO decision",
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java
new file mode 100644
index 0000000000..04905de18c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java
@@ -0,0 +1,364 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.discovery.DiscoveryStats;
+import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
+import org.elasticsearch.http.HttpStats;
+import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
+import org.elasticsearch.indices.breaker.CircuitBreakerStats;
+import org.elasticsearch.ingest.IngestStats;
+import org.elasticsearch.monitor.fs.FsInfo;
+import org.elasticsearch.monitor.jvm.JvmStats;
+import org.elasticsearch.monitor.os.OsStats;
+import org.elasticsearch.monitor.process.ProcessStats;
+import org.elasticsearch.script.ScriptStats;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.threadpool.ThreadPoolStats;
+import org.elasticsearch.transport.TransportStats;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.emptySet;
+
+public class NodeStatsTests extends ESTestCase {
+
+ public void testSerialization() throws IOException {
+ NodeStats nodeStats = createNodeStats();
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ nodeStats.writeTo(out);
+ try (StreamInput in = out.bytes().streamInput()) {
+ NodeStats deserializedNodeStats = NodeStats.readNodeStats(in);
+ assertEquals(nodeStats.getNode(), deserializedNodeStats.getNode());
+ assertEquals(nodeStats.getTimestamp(), deserializedNodeStats.getTimestamp());
+ if (nodeStats.getOs() == null) {
+ assertNull(deserializedNodeStats.getOs());
+ } else {
+ assertEquals(nodeStats.getOs().getTimestamp(), deserializedNodeStats.getOs().getTimestamp());
+ assertEquals(nodeStats.getOs().getSwap().getFree(), deserializedNodeStats.getOs().getSwap().getFree());
+ assertEquals(nodeStats.getOs().getSwap().getTotal(), deserializedNodeStats.getOs().getSwap().getTotal());
+ assertEquals(nodeStats.getOs().getSwap().getUsed(), deserializedNodeStats.getOs().getSwap().getUsed());
+ assertEquals(nodeStats.getOs().getMem().getFree(), deserializedNodeStats.getOs().getMem().getFree());
+ assertEquals(nodeStats.getOs().getMem().getTotal(), deserializedNodeStats.getOs().getMem().getTotal());
+ assertEquals(nodeStats.getOs().getMem().getUsed(), deserializedNodeStats.getOs().getMem().getUsed());
+ assertEquals(nodeStats.getOs().getMem().getFreePercent(), deserializedNodeStats.getOs().getMem().getFreePercent());
+ assertEquals(nodeStats.getOs().getMem().getUsedPercent(), deserializedNodeStats.getOs().getMem().getUsedPercent());
+ assertEquals(nodeStats.getOs().getCpu().getPercent(), deserializedNodeStats.getOs().getCpu().getPercent());
+ assertArrayEquals(nodeStats.getOs().getCpu().getLoadAverage(),
+ deserializedNodeStats.getOs().getCpu().getLoadAverage(), 0);
+ }
+ if (nodeStats.getProcess() == null) {
+ assertNull(deserializedNodeStats.getProcess());
+ } else {
+ assertEquals(nodeStats.getProcess().getTimestamp(), deserializedNodeStats.getProcess().getTimestamp());
+ assertEquals(nodeStats.getProcess().getCpu().getTotal(), deserializedNodeStats.getProcess().getCpu().getTotal());
+ assertEquals(nodeStats.getProcess().getCpu().getPercent(), deserializedNodeStats.getProcess().getCpu().getPercent());
+ assertEquals(nodeStats.getProcess().getMem().getTotalVirtual(),
+ deserializedNodeStats.getProcess().getMem().getTotalVirtual());
+ assertEquals(nodeStats.getProcess().getMaxFileDescriptors(),
+ deserializedNodeStats.getProcess().getMaxFileDescriptors());
+ assertEquals(nodeStats.getProcess().getOpenFileDescriptors(),
+ deserializedNodeStats.getProcess().getOpenFileDescriptors());
+ }
+ JvmStats jvm = nodeStats.getJvm();
+ JvmStats deserializedJvm = deserializedNodeStats.getJvm();
+ if (jvm == null) {
+ assertNull(deserializedJvm);
+ } else {
+ JvmStats.Mem mem = jvm.getMem();
+ JvmStats.Mem deserializedMem = deserializedJvm.getMem();
+ assertEquals(jvm.getTimestamp(), deserializedJvm.getTimestamp());
+ assertEquals(mem.getHeapUsedPercent(), deserializedMem.getHeapUsedPercent());
+ assertEquals(mem.getHeapUsed(), deserializedMem.getHeapUsed());
+ assertEquals(mem.getHeapCommitted(), deserializedMem.getHeapCommitted());
+ assertEquals(mem.getNonHeapCommitted(), deserializedMem.getNonHeapCommitted());
+ assertEquals(mem.getNonHeapUsed(), deserializedMem.getNonHeapUsed());
+ assertEquals(mem.getHeapMax(), deserializedMem.getHeapMax());
+ JvmStats.Classes classes = jvm.getClasses();
+ assertEquals(classes.getLoadedClassCount(), deserializedJvm.getClasses().getLoadedClassCount());
+ assertEquals(classes.getTotalLoadedClassCount(), deserializedJvm.getClasses().getTotalLoadedClassCount());
+ assertEquals(classes.getUnloadedClassCount(), deserializedJvm.getClasses().getUnloadedClassCount());
+ assertEquals(jvm.getGc().getCollectors().length, deserializedJvm.getGc().getCollectors().length);
+ for (int i = 0; i < jvm.getGc().getCollectors().length; i++) {
+ JvmStats.GarbageCollector garbageCollector = jvm.getGc().getCollectors()[i];
+ JvmStats.GarbageCollector deserializedGarbageCollector = deserializedJvm.getGc().getCollectors()[i];
+ assertEquals(garbageCollector.getName(), deserializedGarbageCollector.getName());
+ assertEquals(garbageCollector.getCollectionCount(), deserializedGarbageCollector.getCollectionCount());
+ assertEquals(garbageCollector.getCollectionTime(), deserializedGarbageCollector.getCollectionTime());
+ }
+ assertEquals(jvm.getThreads().getCount(), deserializedJvm.getThreads().getCount());
+ assertEquals(jvm.getThreads().getPeakCount(), deserializedJvm.getThreads().getPeakCount());
+ assertEquals(jvm.getUptime(), deserializedJvm.getUptime());
+ if (jvm.getBufferPools() == null) {
+ assertNull(deserializedJvm.getBufferPools());
+ } else {
+ assertEquals(jvm.getBufferPools().size(), deserializedJvm.getBufferPools().size());
+ for (int i = 0; i < jvm.getBufferPools().size(); i++) {
+ JvmStats.BufferPool bufferPool = jvm.getBufferPools().get(i);
+ JvmStats.BufferPool deserializedBufferPool = deserializedJvm.getBufferPools().get(i);
+ assertEquals(bufferPool.getName(), deserializedBufferPool.getName());
+ assertEquals(bufferPool.getCount(), deserializedBufferPool.getCount());
+ assertEquals(bufferPool.getTotalCapacity(), deserializedBufferPool.getTotalCapacity());
+ assertEquals(bufferPool.getUsed(), deserializedBufferPool.getUsed());
+ }
+ }
+ }
+ if (nodeStats.getThreadPool() == null) {
+ assertNull(deserializedNodeStats.getThreadPool());
+ } else {
+ Iterator<ThreadPoolStats.Stats> threadPoolIterator = nodeStats.getThreadPool().iterator();
+ Iterator<ThreadPoolStats.Stats> deserializedThreadPoolIterator = deserializedNodeStats.getThreadPool().iterator();
+ while (threadPoolIterator.hasNext()) {
+ ThreadPoolStats.Stats stats = threadPoolIterator.next();
+ ThreadPoolStats.Stats deserializedStats = deserializedThreadPoolIterator.next();
+ assertEquals(stats.getName(), deserializedStats.getName());
+ assertEquals(stats.getThreads(), deserializedStats.getThreads());
+ assertEquals(stats.getActive(), deserializedStats.getActive());
+ assertEquals(stats.getLargest(), deserializedStats.getLargest());
+ assertEquals(stats.getCompleted(), deserializedStats.getCompleted());
+ assertEquals(stats.getQueue(), deserializedStats.getQueue());
+ assertEquals(stats.getRejected(), deserializedStats.getRejected());
+ }
+ }
+ FsInfo fs = nodeStats.getFs();
+ FsInfo deserializedFs = deserializedNodeStats.getFs();
+ if (fs == null) {
+ assertNull(deserializedFs);
+ } else {
+ assertEquals(fs.getTimestamp(), deserializedFs.getTimestamp());
+ assertEquals(fs.getTotal().getAvailable(), deserializedFs.getTotal().getAvailable());
+ assertEquals(fs.getTotal().getTotal(), deserializedFs.getTotal().getTotal());
+ assertEquals(fs.getTotal().getFree(), deserializedFs.getTotal().getFree());
+ assertEquals(fs.getTotal().getMount(), deserializedFs.getTotal().getMount());
+ assertEquals(fs.getTotal().getPath(), deserializedFs.getTotal().getPath());
+ assertEquals(fs.getTotal().getSpins(), deserializedFs.getTotal().getSpins());
+ assertEquals(fs.getTotal().getType(), deserializedFs.getTotal().getType());
+ FsInfo.IoStats ioStats = fs.getIoStats();
+ FsInfo.IoStats deserializedIoStats = deserializedFs.getIoStats();
+ assertEquals(ioStats.getTotalOperations(), deserializedIoStats.getTotalOperations());
+ assertEquals(ioStats.getTotalReadKilobytes(), deserializedIoStats.getTotalReadKilobytes());
+ assertEquals(ioStats.getTotalReadOperations(), deserializedIoStats.getTotalReadOperations());
+ assertEquals(ioStats.getTotalWriteKilobytes(), deserializedIoStats.getTotalWriteKilobytes());
+ assertEquals(ioStats.getTotalWriteOperations(), deserializedIoStats.getTotalWriteOperations());
+ assertEquals(ioStats.getDevicesStats().length, deserializedIoStats.getDevicesStats().length);
+ for (int i = 0; i < ioStats.getDevicesStats().length; i++) {
+ FsInfo.DeviceStats deviceStats = ioStats.getDevicesStats()[i];
+ FsInfo.DeviceStats deserializedDeviceStats = deserializedIoStats.getDevicesStats()[i];
+ assertEquals(deviceStats.operations(), deserializedDeviceStats.operations());
+ assertEquals(deviceStats.readKilobytes(), deserializedDeviceStats.readKilobytes());
+ assertEquals(deviceStats.readOperations(), deserializedDeviceStats.readOperations());
+ assertEquals(deviceStats.writeKilobytes(), deserializedDeviceStats.writeKilobytes());
+ assertEquals(deviceStats.writeOperations(), deserializedDeviceStats.writeOperations());
+ }
+ }
+ if (nodeStats.getTransport() == null) {
+ assertNull(deserializedNodeStats.getTransport());
+ } else {
+ assertEquals(nodeStats.getTransport().getRxCount(), deserializedNodeStats.getTransport().getRxCount());
+ assertEquals(nodeStats.getTransport().getRxSize(), deserializedNodeStats.getTransport().getRxSize());
+ assertEquals(nodeStats.getTransport().getServerOpen(), deserializedNodeStats.getTransport().getServerOpen());
+ assertEquals(nodeStats.getTransport().getTxCount(), deserializedNodeStats.getTransport().getTxCount());
+ assertEquals(nodeStats.getTransport().getTxSize(), deserializedNodeStats.getTransport().getTxSize());
+ }
+ if (nodeStats.getHttp() == null) {
+ assertNull(deserializedNodeStats.getHttp());
+ } else {
+ assertEquals(nodeStats.getHttp().getServerOpen(), deserializedNodeStats.getHttp().getServerOpen());
+ assertEquals(nodeStats.getHttp().getTotalOpen(), deserializedNodeStats.getHttp().getTotalOpen());
+ }
+ if (nodeStats.getBreaker() == null) {
+ assertNull(deserializedNodeStats.getBreaker());
+ } else {
+ assertEquals(nodeStats.getBreaker().getAllStats().length, deserializedNodeStats.getBreaker().getAllStats().length);
+ for (int i = 0; i < nodeStats.getBreaker().getAllStats().length; i++) {
+ CircuitBreakerStats circuitBreakerStats = nodeStats.getBreaker().getAllStats()[i];
+ CircuitBreakerStats deserializedCircuitBreakerStats = deserializedNodeStats.getBreaker().getAllStats()[i];
+ assertEquals(circuitBreakerStats.getEstimated(), deserializedCircuitBreakerStats.getEstimated());
+ assertEquals(circuitBreakerStats.getLimit(), deserializedCircuitBreakerStats.getLimit());
+ assertEquals(circuitBreakerStats.getName(), deserializedCircuitBreakerStats.getName());
+ assertEquals(circuitBreakerStats.getOverhead(), deserializedCircuitBreakerStats.getOverhead(), 0);
+ assertEquals(circuitBreakerStats.getTrippedCount(), deserializedCircuitBreakerStats.getTrippedCount(), 0);
+ }
+ }
+ ScriptStats scriptStats = nodeStats.getScriptStats();
+ if (scriptStats == null) {
+ assertNull(deserializedNodeStats.getScriptStats());
+ } else {
+ assertEquals(scriptStats.getCacheEvictions(), deserializedNodeStats.getScriptStats().getCacheEvictions());
+ assertEquals(scriptStats.getCompilations(), deserializedNodeStats.getScriptStats().getCompilations());
+ }
+ DiscoveryStats discoveryStats = nodeStats.getDiscoveryStats();
+ DiscoveryStats deserializedDiscoveryStats = deserializedNodeStats.getDiscoveryStats();
+ if (discoveryStats == null) {
+ assertNull(deserializedDiscoveryStats);
+ } else {
+ PendingClusterStateStats queueStats = discoveryStats.getQueueStats();
+ if (queueStats == null) {
+ assertNull(deserializedDiscoveryStats.getQueueStats());
+ } else {
+ assertEquals(queueStats.getCommitted(), deserializedDiscoveryStats.getQueueStats().getCommitted());
+ assertEquals(queueStats.getTotal(), deserializedDiscoveryStats.getQueueStats().getTotal());
+ assertEquals(queueStats.getPending(), deserializedDiscoveryStats.getQueueStats().getPending());
+ }
+ }
+ IngestStats ingestStats = nodeStats.getIngestStats();
+ IngestStats deserializedIngestStats = deserializedNodeStats.getIngestStats();
+ if (ingestStats == null) {
+ assertNull(deserializedIngestStats);
+ } else {
+ IngestStats.Stats totalStats = ingestStats.getTotalStats();
+ assertEquals(totalStats.getIngestCount(), deserializedIngestStats.getTotalStats().getIngestCount());
+ assertEquals(totalStats.getIngestCurrent(), deserializedIngestStats.getTotalStats().getIngestCurrent());
+ assertEquals(totalStats.getIngestFailedCount(), deserializedIngestStats.getTotalStats().getIngestFailedCount());
+ assertEquals(totalStats.getIngestTimeInMillis(), deserializedIngestStats.getTotalStats().getIngestTimeInMillis());
+ assertEquals(ingestStats.getStatsPerPipeline().size(), deserializedIngestStats.getStatsPerPipeline().size());
+ for (Map.Entry<String, IngestStats.Stats> entry : ingestStats.getStatsPerPipeline().entrySet()) {
+ IngestStats.Stats stats = entry.getValue();
+ IngestStats.Stats deserializedStats = deserializedIngestStats.getStatsPerPipeline().get(entry.getKey());
+ assertEquals(stats.getIngestFailedCount(), deserializedStats.getIngestFailedCount());
+ assertEquals(stats.getIngestTimeInMillis(), deserializedStats.getIngestTimeInMillis());
+ assertEquals(stats.getIngestCurrent(), deserializedStats.getIngestCurrent());
+ assertEquals(stats.getIngestCount(), deserializedStats.getIngestCount());
+ }
+ }
+ }
+ }
+ }
+
+ private static NodeStats createNodeStats() {
+ DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(),
+ emptyMap(), emptySet(), VersionUtils.randomVersion(random()));
+ OsStats osStats = null;
+ if (frequently()) {
+ double loadAverages[] = new double[3];
+ for (int i = 0; i < 3; i++) {
+ loadAverages[i] = randomBoolean() ? randomDouble() : -1;
+ }
+ osStats = new OsStats(System.currentTimeMillis(), new OsStats.Cpu(randomShort(), loadAverages),
+ new OsStats.Mem(randomLong(), randomLong()),
+ new OsStats.Swap(randomLong(), randomLong()));
+ }
+ ProcessStats processStats = frequently() ? new ProcessStats(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ new ProcessStats.Cpu(randomShort(), randomPositiveLong()),
+ new ProcessStats.Mem(randomPositiveLong())) : null;
+ JvmStats jvmStats = null;
+ if (frequently()) {
+ int numMemoryPools = randomIntBetween(0, 10);
+ List<JvmStats.MemoryPool> memoryPools = new ArrayList<>(numMemoryPools);
+ for (int i = 0; i < numMemoryPools; i++) {
+ memoryPools.add(new JvmStats.MemoryPool(randomAsciiOfLengthBetween(3, 10), randomPositiveLong(),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong()));
+ }
+ JvmStats.Threads threads = new JvmStats.Threads(randomIntBetween(1, 1000), randomIntBetween(1, 1000));
+ int numGarbageCollectors = randomIntBetween(0, 10);
+ JvmStats.GarbageCollector[] garbageCollectorsArray = new JvmStats.GarbageCollector[numGarbageCollectors];
+ for (int i = 0; i < numGarbageCollectors; i++) {
+ garbageCollectorsArray[i] = new JvmStats.GarbageCollector(randomAsciiOfLengthBetween(3, 10),
+ randomPositiveLong(), randomPositiveLong());
+ }
+ JvmStats.GarbageCollectors garbageCollectors = new JvmStats.GarbageCollectors(garbageCollectorsArray);
+ int numBufferPools = randomIntBetween(0, 10);
+ List<JvmStats.BufferPool> bufferPoolList = new ArrayList<>();
+ for (int i = 0; i < numBufferPools; i++) {
+ bufferPoolList.add(new JvmStats.BufferPool(randomAsciiOfLengthBetween(3, 10), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong()));
+ }
+ JvmStats.Classes classes = new JvmStats.Classes(randomPositiveLong(), randomPositiveLong(), randomPositiveLong());
+ jvmStats = frequently() ? new JvmStats(randomPositiveLong(), randomPositiveLong(), new JvmStats.Mem(randomPositiveLong(),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), memoryPools), threads,
+ garbageCollectors, randomBoolean() ? Collections.emptyList() : bufferPoolList, classes) : null;
+ }
+ ThreadPoolStats threadPoolStats = null;
+ if (frequently()) {
+ int numThreadPoolStats = randomIntBetween(0, 10);
+ List<ThreadPoolStats.Stats> threadPoolStatsList = new ArrayList<>();
+ for (int i = 0; i < numThreadPoolStats; i++) {
+ threadPoolStatsList.add(new ThreadPoolStats.Stats(randomAsciiOfLengthBetween(3, 10), randomIntBetween(1, 1000),
+ randomIntBetween(1, 1000), randomIntBetween(1, 1000), randomPositiveLong(),
+ randomIntBetween(1, 1000), randomIntBetween(1, 1000)));
+ }
+ threadPoolStats = new ThreadPoolStats(threadPoolStatsList);
+ }
+ FsInfo fsInfo = null;
+ if (frequently()) {
+ int numDeviceStats = randomIntBetween(0, 10);
+ FsInfo.DeviceStats[] deviceStatsArray = new FsInfo.DeviceStats[numDeviceStats];
+ for (int i = 0; i < numDeviceStats; i++) {
+ FsInfo.DeviceStats previousDeviceStats = randomBoolean() ? null :
+ new FsInfo.DeviceStats(randomInt(), randomInt(), randomAsciiOfLengthBetween(3, 10),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), null);
+ deviceStatsArray[i] = new FsInfo.DeviceStats(randomInt(), randomInt(), randomAsciiOfLengthBetween(3, 10),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), previousDeviceStats);
+ }
+ FsInfo.IoStats ioStats = new FsInfo.IoStats(deviceStatsArray);
+ int numPaths = randomIntBetween(0, 10);
+ FsInfo.Path[] paths = new FsInfo.Path[numPaths];
+ for (int i = 0; i < numPaths; i++) {
+ paths[i] = new FsInfo.Path(randomAsciiOfLengthBetween(3, 10), randomBoolean() ? randomAsciiOfLengthBetween(3, 10) : null,
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong());
+ }
+ fsInfo = new FsInfo(randomPositiveLong(), ioStats, paths);
+ }
+ TransportStats transportStats = frequently() ? new TransportStats(randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong()) : null;
+ HttpStats httpStats = frequently() ? new HttpStats(randomPositiveLong(), randomPositiveLong()) : null;
+ AllCircuitBreakerStats allCircuitBreakerStats = null;
+ if (frequently()) {
+ int numCircuitBreakerStats = randomIntBetween(0, 10);
+ CircuitBreakerStats[] circuitBreakerStatsArray = new CircuitBreakerStats[numCircuitBreakerStats];
+ for (int i = 0; i < numCircuitBreakerStats; i++) {
+ circuitBreakerStatsArray[i] = new CircuitBreakerStats(randomAsciiOfLengthBetween(3, 10), randomPositiveLong(),
+ randomPositiveLong(), randomDouble(), randomPositiveLong());
+ }
+ allCircuitBreakerStats = new AllCircuitBreakerStats(circuitBreakerStatsArray);
+ }
+ ScriptStats scriptStats = frequently() ? new ScriptStats(randomPositiveLong(), randomPositiveLong()) : null;
+ DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats(randomBoolean() ? new PendingClusterStateStats(randomInt(),
+ randomInt(), randomInt()) : null) : null;
+ IngestStats ingestStats = null;
+ if (frequently()) {
+ IngestStats.Stats totalStats = new IngestStats.Stats(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong());
+
+ int numStatsPerPipeline = randomIntBetween(0, 10);
+ Map<String, IngestStats.Stats> statsPerPipeline = new HashMap<>();
+ for (int i = 0; i < numStatsPerPipeline; i++) {
+ statsPerPipeline.put(randomAsciiOfLengthBetween(3, 10), new IngestStats.Stats(randomPositiveLong(),
+ randomPositiveLong(), randomPositiveLong(), randomPositiveLong()));
+ }
+ ingestStats = new IngestStats(totalStats, statsPerPipeline);
+ }
+ //TODO NodeIndicesStats are not tested here, way too complicated to create, also they need to be migrated to Writeable yet
+ return new NodeStats(node, randomPositiveLong(), null, osStats, processStats, jvmStats, threadPoolStats, fsInfo,
+ transportStats, httpStats, allCircuitBreakerStats, scriptStats, discoveryStats, ingestStats);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
index 8bb32c240f..c457d3a30f 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
@@ -169,7 +169,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
clusterService = createClusterService(threadPool);
transportService = new TransportService(settings,
new LocalTransport(settings, threadPool, new NamedWriteableRegistry(Collections.emptyList()),
- new NoneCircuitBreakerService()), threadPool) {
+ new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR) {
@Override
protected TaskManager createTaskManager() {
if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
index a7234b20ab..134477cc20 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
@@ -37,7 +37,10 @@ import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.fieldstats.FieldStatsAction;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.replication.ReplicationResponse;
+import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
@@ -46,10 +49,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
+import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.tasks.MockTaskManager;
@@ -69,12 +72,11 @@ import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
+import static java.util.Collections.emptyList;
import static java.util.Collections.singleton;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
@@ -94,7 +96,7 @@ import static org.hamcrest.Matchers.not;
* <p>
* We need at least 2 nodes so we have a master node a non-master node
*/
-@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2)
+@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2, transportClientRatio = 0.0)
public class TasksIT extends ESIntegTestCase {
private Map<Tuple<String, String>, RecordingTaskManagerListener> listeners = new HashMap<>();
@@ -326,48 +328,34 @@ public class TasksIT extends ESIntegTestCase {
}
/**
- * Very basic "is it plugged in" style test that indexes a document and
- * makes sure that you can fetch the status of the process. The goal here is
- * to verify that the large moving parts that make fetching task status work
- * fit together rather than to verify any particular status results from
- * indexing. For that, look at
- * {@link org.elasticsearch.action.support.replication.TransportReplicationActionTests}
- * . We intentionally don't use the task recording mechanism used in other
- * places in this test so we can make sure that the status fetching works
- * properly over the wire.
+ * Very basic "is it plugged in" style test that indexes a document and makes sure that you can fetch the status of the process. The
+ * goal here is to verify that the large moving parts that make fetching task status work fit together rather than to verify any
+ * particular status results from indexing. For that, look at {@link TransportReplicationActionTests}. We intentionally don't use the
+ * task recording mechanism used in other places in this test so we can make sure that the status fetching works properly over the wire.
*/
- public void testCanFetchIndexStatus() throws InterruptedException, ExecutionException, IOException {
- /*
- * We prevent any tasks from unregistering until the test is done so we
- * can fetch them. This will gum up the server if we leave it enabled
- * but we'll be quick so it'll be OK (TM).
- */
- ReentrantLock taskFinishLock = new ReentrantLock();
- taskFinishLock.lock();
- ListenableActionFuture<?> indexFuture = null;
+ public void testCanFetchIndexStatus() throws Exception {
+ // First latch waits for the task to start, second on blocks it from finishing.
+ CountDownLatch taskRegistered = new CountDownLatch(1);
+ CountDownLatch letTaskFinish = new CountDownLatch(1);
+ Thread index = null;
try {
- CountDownLatch taskRegistered = new CountDownLatch(1);
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void onTaskRegistered(Task task) {
if (task.getAction().startsWith(IndexAction.NAME)) {
taskRegistered.countDown();
+ logger.debug("Blocking [{}] starting", task);
+ try {
+ assertTrue(letTaskFinish.await(10, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
}
}
@Override
public void onTaskUnregistered(Task task) {
- /*
- * We can't block all tasks here or the task listing task
- * would never return.
- */
- if (false == task.getAction().startsWith(IndexAction.NAME)) {
- return;
- }
- logger.debug("Blocking {} from being unregistered", task);
- taskFinishLock.lock();
- taskFinishLock.unlock();
}
@Override
@@ -375,8 +363,13 @@ public class TasksIT extends ESIntegTestCase {
}
});
}
- indexFuture = client().prepareIndex("test", "test").setSource("test", "test").execute();
- taskRegistered.await(10, TimeUnit.SECONDS); // waiting for at least one task to be registered
+ // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener
+ index = new Thread(() -> {
+ IndexResponse indexResponse = client().prepareIndex("test", "test").setSource("test", "test").get();
+ assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures());
+ });
+ index.start();
+ assertTrue(taskRegistered.await(10, TimeUnit.SECONDS)); // waiting for at least one task to be registered
ListTasksResponse listResponse = client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*")
.setDetailed(true).get();
@@ -390,17 +383,21 @@ public class TasksIT extends ESIntegTestCase {
assertEquals(task.getType(), fetchedWithGet.getType());
assertEquals(task.getAction(), fetchedWithGet.getAction());
assertEquals(task.getDescription(), fetchedWithGet.getDescription());
- // The status won't always be equal - it might change between the list and the get.
+ assertEquals(task.getStatus(), fetchedWithGet.getStatus());
assertEquals(task.getStartTime(), fetchedWithGet.getStartTime());
assertThat(fetchedWithGet.getRunningTimeNanos(), greaterThanOrEqualTo(task.getRunningTimeNanos()));
assertEquals(task.isCancellable(), fetchedWithGet.isCancellable());
assertEquals(task.getParentTaskId(), fetchedWithGet.getParentTaskId());
}
} finally {
- taskFinishLock.unlock();
- if (indexFuture != null) {
- indexFuture.get();
+ letTaskFinish.countDown();
+ if (index != null) {
+ index.join();
}
+ assertBusy(() -> {
+ assertEquals(emptyList(),
+ client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks());
+ });
}
}
@@ -449,6 +446,9 @@ public class TasksIT extends ESIntegTestCase {
}, response -> {
assertThat(response.getNodeFailures(), empty());
assertThat(response.getTaskFailures(), empty());
+ assertThat(response.getTasks(), hasSize(1));
+ TaskInfo task = response.getTasks().get(0);
+ assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction());
});
}
@@ -456,10 +456,12 @@ public class TasksIT extends ESIntegTestCase {
waitForCompletionTestCase(false, id -> {
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
}, response -> {
- assertNotNull(response.getTask().getTask());
assertTrue(response.getTask().isCompleted());
// We didn't store the result so it won't come back when we wait
assertNull(response.getTask().getResponse());
+ // But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete.
+ assertNotNull(response.getTask().getTask());
+ assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
});
}
@@ -467,10 +469,12 @@ public class TasksIT extends ESIntegTestCase {
waitForCompletionTestCase(true, id -> {
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
}, response -> {
- assertNotNull(response.getTask().getTask());
assertTrue(response.getTask().isCompleted());
// We stored the task so we should get its results
assertEquals(0, response.getTask().getResponseAsMap().get("failure_count"));
+ // The task's details should also be there
+ assertNotNull(response.getTask().getTask());
+ assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
});
}
@@ -500,6 +504,7 @@ public class TasksIT extends ESIntegTestCase {
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void waitForTaskCompletion(Task task) {
+ waitForWaitingToStart.countDown();
}
@Override
@@ -508,7 +513,6 @@ public class TasksIT extends ESIntegTestCase {
@Override
public void onTaskUnregistered(Task task) {
- waitForWaitingToStart.countDown();
}
});
}
@@ -516,7 +520,9 @@ public class TasksIT extends ESIntegTestCase {
// Spin up a request to wait for the test task to finish
waitResponseFuture = wait.apply(taskId);
- // Wait for the wait to start
+ /* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a
+ * reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait
+ * on which is fine. */
waitForWaitingToStart.await();
} finally {
// Unblock the request so the wait for completion request can finish
@@ -527,7 +533,8 @@ public class TasksIT extends ESIntegTestCase {
T waitResponse = waitResponseFuture.get();
validator.accept(waitResponse);
- future.get();
+ TestTaskPlugin.NodesResponse response = future.get();
+ assertEquals(emptyList(), response.failures());
}
public void testListTasksWaitForTimeout() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
index ca0242c066..c8133ba7ba 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
@@ -45,7 +45,6 @@ import org.elasticsearch.test.rest.FakeRestRequest;
import java.io.IOException;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -74,9 +73,8 @@ public class ClusterRerouteRequestTests extends ESTestCase {
private final AllocationCommandRegistry allocationCommandRegistry;
public ClusterRerouteRequestTests() {
- NetworkModule networkModule = new NetworkModule(null, null, true);
- allocationCommandRegistry = networkModule.getAllocationCommandRegistry();
- namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
+ allocationCommandRegistry = NetworkModule.getAllocationCommandRegistry();
+ namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables());
}
private ClusterRerouteRequest randomRequest() {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java
index b3b91e6bfd..a905487994 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java
@@ -21,14 +21,14 @@ package org.elasticsearch.action.admin.cluster.reroute;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@@ -41,8 +41,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.io.IOException;
import java.util.Collections;
@@ -51,6 +50,8 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
public class ClusterRerouteTests extends ESAllocationTestCase {
@@ -64,8 +65,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
BytesStreamOutput out = new BytesStreamOutput();
req.writeTo(out);
BytesReference bytes = out.bytes();
- NetworkModule networkModule = new NetworkModule(null, Settings.EMPTY, true);
- NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
+ NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables());
StreamInput wrap = new NamedWriteableAwareStreamInput(bytes.streamInput(),
namedWriteableRegistry);
ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest();
@@ -82,7 +82,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
public void testClusterStateUpdateTask() {
AllocationService allocationService = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
ClusterState clusterState = createInitialClusterState(allocationService);
ClusterRerouteRequest req = new ClusterRerouteRequest();
req.dryRun(true);
@@ -118,12 +118,12 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i);
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
new UnsupportedOperationException()));
- RoutingAllocation.Result result = allocationService.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build();
+ newState = allocationService.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingTable = clusterState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
if (i == retries-1) {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
index f2ed690bb9..aa73eafb49 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java
@@ -21,12 +21,15 @@ package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.monitor.os.OsStats;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@@ -167,7 +170,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000
assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L));
- assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0L));
+ assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), Matchers.greaterThan(0L));
assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
@@ -181,6 +184,20 @@ public class ClusterStatsIT extends ESIntegTestCase {
assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+ NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().setOs(true).get();
+ long total = 0;
+ long free = 0;
+ long used = 0;
+ for (NodeStats nodeStats : nodesStatsResponse.getNodes()) {
+ total += nodeStats.getOs().getMem().getTotal().getBytes();
+ free += nodeStats.getOs().getMem().getFree().getBytes();
+ used += nodeStats.getOs().getMem().getUsed().getBytes();
+ }
+ assertEquals(msg, free, response.nodesStats.getOs().getMem().getFree().getBytes());
+ assertEquals(msg, total, response.nodesStats.getOs().getMem().getTotal().getBytes());
+ assertEquals(msg, used, response.nodesStats.getOs().getMem().getUsed().getBytes());
+ assertEquals(msg, OsStats.calculatePercentage(used, total), response.nodesStats.getOs().getMem().getUsedPercent());
+ assertEquals(msg, OsStats.calculatePercentage(free, total), response.nodesStats.getOs().getMem().getFreePercent());
}
public void testAllocatedProcessors() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java
index 6919db1b73..bcd7bba8d3 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java
@@ -28,7 +28,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.AllFieldMapper;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.test.ESTestCase;
@@ -41,7 +41,7 @@ import static java.util.Collections.emptyList;
public class TransportAnalyzeActionTests extends ESTestCase {
- private AnalysisService analysisService;
+ private IndexAnalyzers indexAnalyzers;
private AnalysisRegistry registry;
private Environment environment;
@@ -70,10 +70,10 @@ public class TransportAnalyzeActionTests extends ESTestCase {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
environment = new Environment(settings);
registry = new AnalysisModule(environment, emptyList()).getAnalysisRegistry();
- analysisService = registry.build(idxSettings);
+ indexAnalyzers = registry.build(idxSettings);
}
- public void testNoAnalysisService() throws IOException {
+ public void testNoIndexAnalyzers() throws IOException {
AnalyzeRequest request = new AnalyzeRequest();
request.analyzer("standard");
request.text("the quick brown fox");
@@ -86,7 +86,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("word_delimiter");
request.text("the qu1ck brown fox");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? analysisService : null, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? indexAnalyzers : null, registry, environment);
tokens = analyze.getTokens();
assertEquals(6, tokens.size());
assertEquals("qu", tokens.get(1).getTerm());
@@ -99,7 +99,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("word_delimiter");
request.text("<p>the qu1ck brown fox</p>");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? analysisService : null, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, randomBoolean() ? indexAnalyzers : null, registry, environment);
tokens = analyze.getTokens();
assertEquals(6, tokens.size());
assertEquals("the", tokens.get(0).getTerm());
@@ -142,26 +142,26 @@ public class TransportAnalyzeActionTests extends ESTestCase {
assertEquals("<ALPHANUM>", tokens.get(3).getType());
}
- public void testWithAnalysisService() throws IOException {
+ public void testWithIndexAnalyzers() throws IOException {
AnalyzeRequest request = new AnalyzeRequest();
request.analyzer("standard");
request.text("the quick brown fox");
request.analyzer("custom_analyzer");
request.text("the qu1ck brown fox");
- AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
+ AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
List<AnalyzeResponse.AnalyzeToken> tokens = analyze.getTokens();
assertEquals(4, tokens.size());
request.analyzer("whitespace");
request.text("the qu1ck brown fox-dog");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(4, tokens.size());
request.analyzer("custom_analyzer");
request.text("the qu1ck brown fox-dog");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(5, tokens.size());
@@ -170,7 +170,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.addTokenFilter("lowercase");
request.addTokenFilter("wordDelimiter");
request.text("the qu1ck brown fox-dog");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(5, tokens.size());
assertEquals("the", tokens.get(0).getTerm());
@@ -183,83 +183,88 @@ public class TransportAnalyzeActionTests extends ESTestCase {
request.tokenizer("trigram");
request.addTokenFilter("synonym");
request.text("kimchy");
- analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, analysisService, registry, environment);
+ analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
tokens = analyze.getTokens();
assertEquals(2, tokens.size());
assertEquals("sha", tokens.get(0).getTerm());
assertEquals("hay", tokens.get(1).getTerm());
}
- public void testGetIndexAnalyserWithoutAnalysisService() throws IOException {
- AnalyzeRequest request = new AnalyzeRequest();
- request.analyzer("custom_analyzer");
- request.text("the qu1ck brown fox-dog");
- try {
- TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, null, registry, environment);
- fail("no analysis service provided");
- } catch (IllegalArgumentException e) {
- assertEquals(e.getMessage(), "failed to find global analyzer [custom_analyzer]");
- }
+ public void testGetIndexAnalyserWithoutIndexAnalyzers() throws IOException {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> TransportAnalyzeAction.analyze(
+ new AnalyzeRequest()
+ .analyzer("custom_analyzer")
+ .text("the qu1ck brown fox-dog"),
+ AllFieldMapper.NAME, null, null, registry, environment));
+ assertEquals(e.getMessage(), "failed to find global analyzer [custom_analyzer]");
}
public void testUnknown() throws IOException {
boolean notGlobal = randomBoolean();
- try {
- AnalyzeRequest request = new AnalyzeRequest();
- request.analyzer("foobar");
- request.text("the qu1ck brown fox");
- TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment);
- fail("no such analyzer");
- } catch (IllegalArgumentException e) {
- if (notGlobal) {
- assertEquals(e.getMessage(), "failed to find analyzer [foobar]");
- } else {
- assertEquals(e.getMessage(), "failed to find global analyzer [foobar]");
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> TransportAnalyzeAction.analyze(
+ new AnalyzeRequest()
+ .analyzer("foobar")
+ .text("the qu1ck brown fox"),
+ AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
+ if (notGlobal) {
+ assertEquals(e.getMessage(), "failed to find analyzer [foobar]");
+ } else {
+ assertEquals(e.getMessage(), "failed to find global analyzer [foobar]");
}
- try {
- AnalyzeRequest request = new AnalyzeRequest();
- request.tokenizer("foobar");
- request.text("the qu1ck brown fox");
- TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment);
- fail("no such analyzer");
- } catch (IllegalArgumentException e) {
- if (notGlobal) {
- assertEquals(e.getMessage(), "failed to find tokenizer under [foobar]");
- } else {
- assertEquals(e.getMessage(), "failed to find global tokenizer under [foobar]");
- }
+
+ e = expectThrows(IllegalArgumentException.class,
+ () -> TransportAnalyzeAction.analyze(
+ new AnalyzeRequest()
+ .tokenizer("foobar")
+ .text("the qu1ck brown fox"),
+ AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
+ if (notGlobal) {
+ assertEquals(e.getMessage(), "failed to find tokenizer under [foobar]");
+ } else {
+ assertEquals(e.getMessage(), "failed to find global tokenizer under [foobar]");
}
- try {
- AnalyzeRequest request = new AnalyzeRequest();
- request.tokenizer("whitespace");
- request.addTokenFilter("foobar");
- request.text("the qu1ck brown fox");
- TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment);
- fail("no such analyzer");
- } catch (IllegalArgumentException e) {
- if (notGlobal) {
- assertEquals(e.getMessage(), "failed to find token filter under [foobar]");
- } else {
- assertEquals(e.getMessage(), "failed to find global token filter under [foobar]");
- }
+ e = expectThrows(IllegalArgumentException.class,
+ () -> TransportAnalyzeAction.analyze(
+ new AnalyzeRequest()
+ .tokenizer("whitespace")
+ .addTokenFilter("foobar")
+ .text("the qu1ck brown fox"),
+ AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
+ if (notGlobal) {
+ assertEquals(e.getMessage(), "failed to find token filter under [foobar]");
+ } else {
+ assertEquals(e.getMessage(), "failed to find global token filter under [foobar]");
}
- try {
- AnalyzeRequest request = new AnalyzeRequest();
- request.tokenizer("whitespace");
- request.addTokenFilter("lowercase");
- request.addCharFilter("foobar");
- request.text("the qu1ck brown fox");
- TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, notGlobal ? analysisService : null, registry, environment);
- fail("no such analyzer");
- } catch (IllegalArgumentException e) {
- if (notGlobal) {
- assertEquals(e.getMessage(), "failed to find char filter under [foobar]");
- } else {
- assertEquals(e.getMessage(), "failed to find global char filter under [foobar]");
- }
+ e = expectThrows(IllegalArgumentException.class,
+ () -> TransportAnalyzeAction.analyze(
+ new AnalyzeRequest()
+ .tokenizer("whitespace")
+ .addTokenFilter("lowercase")
+ .addCharFilter("foobar")
+ .text("the qu1ck brown fox"),
+ AllFieldMapper.NAME, null, notGlobal ? indexAnalyzers : null, registry, environment));
+ if (notGlobal) {
+ assertEquals(e.getMessage(), "failed to find char filter under [foobar]");
+ } else {
+ assertEquals(e.getMessage(), "failed to find global char filter under [foobar]");
}
}
+
+ public void testNonPreBuildTokenFilter() throws IOException {
+ AnalyzeRequest request = new AnalyzeRequest();
+ request.tokenizer("whitespace");
+ request.addTokenFilter("min_hash");
+ request.text("the quick brown fox");
+ AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
+ List<AnalyzeResponse.AnalyzeToken> tokens = analyze.getTokens();
+ int default_hash_count = 1;
+ int default_bucket_size = 512;
+ int default_hash_set_size = 1;
+ assertEquals(default_hash_count * default_bucket_size * default_hash_set_size, tokens.size());
+
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java
new file mode 100644
index 0000000000..0038a6c840
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java
@@ -0,0 +1,350 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.hamcrest.Matchers.arrayContaining;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class AliasActionsTests extends ESTestCase {
+ public void testValidate() {
+ AliasActions.Type type = randomFrom(AliasActions.Type.values());
+ if (type == AliasActions.Type.REMOVE_INDEX) {
+ Exception e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).validate());
+ assertEquals("One of [index] or [indices] is required", e.getMessage());
+ } else {
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(type).alias(randomAsciiOfLength(5)).validate());
+ assertEquals("One of [index] or [indices] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).index(randomAsciiOfLength(5)).validate());
+ assertEquals("One of [alias] or [aliases] is required", e.getMessage());
+ }
+ }
+
+ public void testEmptyIndex() {
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).index(null));
+ assertEquals("[index] can't be empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).index(""));
+ assertEquals("[index] can't be empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).indices((String[]) null));
+ assertEquals("[indices] can't be empty", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).indices(new String[0]));
+ assertEquals("[indices] can't be empty", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).indices("test", null));
+ assertEquals("[indices] can't contain empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new AliasActions(randomFrom(AliasActions.Type.values())).indices("test", ""));
+ assertEquals("[indices] can't contain empty string", e.getMessage());
+ }
+
+ public void testEmptyAlias() {
+ AliasActions.Type type = randomValueOtherThan(AliasActions.Type.REMOVE_INDEX, () -> randomFrom(AliasActions.Type.values()));
+ Exception e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).alias(null));
+ assertEquals("[alias] can't be empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).alias(""));
+ assertEquals("[alias] can't be empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).aliases((String[]) null));
+ assertEquals("[aliases] can't be empty", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).aliases(new String[0]));
+ assertEquals("[aliases] can't be empty", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).aliases("test", null));
+ assertEquals("[aliases] can't contain empty string", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(type).aliases("test", ""));
+ assertEquals("[aliases] can't contain empty string", e.getMessage());
+ }
+
+ public void testBadOptionsInNonIndex() {
+ AliasActions action = randomBoolean() ? AliasActions.remove() : AliasActions.removeIndex();
+ Exception e = expectThrows(IllegalArgumentException.class, () -> action.routing("test"));
+ assertEquals("[routing] is unsupported for [" + action.actionType() + "]", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> action.searchRouting("test"));
+ assertEquals("[search_routing] is unsupported for [" + action.actionType() + "]", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> action.indexRouting("test"));
+ assertEquals("[index_routing] is unsupported for [" + action.actionType() + "]", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> action.filter("test"));
+ assertEquals("[filter] is unsupported for [" + action.actionType() + "]", e.getMessage());
+ }
+
+ public void testParseAdd() throws IOException {
+ String[] indices = generateRandomStringArray(10, 5, false, false);
+ String[] aliases = generateRandomStringArray(10, 5, false, false);
+ Map<String, Object> filter = randomBoolean() ? randomMap(5) : null;
+ Object searchRouting = randomBoolean() ? randomRouting() : null;
+ Object indexRouting = randomBoolean() ? randomBoolean() ? searchRouting : randomRouting() : null;
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject("add"); {
+ if (indices.length > 1 || randomBoolean()) {
+ b.array("indices", indices);
+ } else {
+ b.field("index", indices[0]);
+ }
+ if (aliases.length > 1 || randomBoolean()) {
+ b.array("aliases", aliases);
+ } else {
+ b.field("alias", aliases[0]);
+ }
+ if (filter != null) {
+ b.field("filter", filter);
+ }
+ if (searchRouting != null) {
+ if (searchRouting.equals(indexRouting)) {
+ b.field("routing", searchRouting);
+ } else {
+ b.field("search_routing", searchRouting);
+ }
+ }
+ if (indexRouting != null && false == indexRouting.equals(searchRouting)) {
+ b.field("index_routing", indexRouting);
+ }
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b = shuffleXContent(b, "filter");
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ AliasActions action = AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
+ assertEquals(AliasActions.Type.ADD, action.actionType());
+ assertThat(action.indices(), equalTo(indices));
+ assertThat(action.aliases(), equalTo(aliases));
+ if (filter == null || filter.isEmpty()) {
+ assertNull(action.filter());
+ } else {
+ assertEquals(XContentFactory.contentBuilder(XContentType.JSON).map(filter).string(), action.filter());
+ }
+ assertEquals(Objects.toString(searchRouting, null), action.searchRouting());
+ assertEquals(Objects.toString(indexRouting, null), action.indexRouting());
+ }
+ }
+
+ public void testParseAddDefaultRouting() throws IOException {
+ String index = randomAsciiOfLength(5);
+ String alias = randomAsciiOfLength(5);
+ Object searchRouting = randomRouting();
+ Object indexRouting = randomRouting();
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject("add"); {
+ b.field("index", index);
+ b.field("alias", alias);
+ if (randomBoolean()) {
+ b.field("routing", searchRouting);
+ b.field("index_routing", indexRouting);
+ } else {
+ b.field("search_routing", searchRouting);
+ b.field("routing", indexRouting);
+ }
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ AliasActions action = AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
+ assertEquals(AliasActions.Type.ADD, action.actionType());
+ assertThat(action.indices(), arrayContaining(index));
+ assertThat(action.aliases(), arrayContaining(alias));
+ assertEquals(searchRouting.toString(), action.searchRouting());
+ assertEquals(indexRouting.toString(), action.indexRouting());
+ }
+ }
+
+ public void testParseRemove() throws IOException {
+ String[] indices = generateRandomStringArray(10, 5, false, false);
+ String[] aliases = generateRandomStringArray(10, 5, false, false);
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject("remove"); {
+ if (indices.length > 1 || randomBoolean()) {
+ b.array("indices", indices);
+ } else {
+ b.field("index", indices[0]);
+ }
+ if (aliases.length > 1 || randomBoolean()) {
+ b.array("aliases", aliases);
+ } else {
+ b.field("alias", aliases[0]);
+ }
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ AliasActions action = AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
+ assertEquals(AliasActions.Type.REMOVE, action.actionType());
+ assertThat(action.indices(), equalTo(indices));
+ assertThat(action.aliases(), equalTo(aliases));
+ }
+ }
+
+ public void testParseRemoveIndex() throws IOException {
+ String[] indices = randomBoolean() ? new String[] {randomAsciiOfLength(5)} : generateRandomStringArray(10, 5, false, false);
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject("remove_index"); {
+ if (indices.length > 1 || randomBoolean()) {
+ b.array("indices", indices);
+ } else {
+ b.field("index", indices[0]);
+ }
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ AliasActions action = AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
+ assertEquals(AliasActions.Type.REMOVE_INDEX, action.actionType());
+ assertArrayEquals(indices, action.indices());
+ assertThat(action.aliases(), arrayWithSize(0));
+ }
+ }
+
+ public void testParseIndexAndIndicesThrowsError() throws IOException {
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject(randomFrom("add", "remove")); {
+ b.field("index", randomAsciiOfLength(5));
+ b.array("indices", generateRandomStringArray(10, 5, false, false));
+ b.field("alias", randomAsciiOfLength(5));
+ }
+ b.endObject();
+ }
+ b.endObject();
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ Exception e = expectThrows(ParsingException.class, () -> AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT));
+ assertThat(e.getCause().getCause(), instanceOf(IllegalArgumentException.class));
+ assertEquals("Only one of [index] and [indices] is supported", e.getCause().getCause().getMessage());
+ }
+ }
+
+ public void testParseAliasAndAliasesThrowsError() throws IOException {
+ XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent());
+ b.startObject(); {
+ b.startObject(randomFrom("add", "remove")); {
+ b.field("index", randomAsciiOfLength(5));
+ b.field("alias", randomAsciiOfLength(5));
+ b.array("aliases", generateRandomStringArray(10, 5, false, false));
+ }
+ b.endObject();
+ }
+ b.endObject();
+ try (XContentParser parser = XContentHelper.createParser(b.bytes())) {
+ Exception e = expectThrows(ParsingException.class, () -> AliasActions.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT));
+ assertThat(e.getCause().getCause(), instanceOf(IllegalArgumentException.class));
+ assertEquals("Only one of [alias] and [aliases] is supported", e.getCause().getCause().getMessage());
+ }
+ }
+
+ public void testRoundTrip() throws IOException {
+ AliasActions action = new AliasActions(randomFrom(AliasActions.Type.values()));
+ if (randomBoolean()) {
+ action.index(randomAsciiOfLength(5));
+ } else {
+ action.indices(generateRandomStringArray(5, 5, false, false));
+ }
+ if (action.actionType() != AliasActions.Type.REMOVE_INDEX) {
+ if (randomBoolean()) {
+ action.alias(randomAsciiOfLength(5));
+ } else {
+ action.aliases(generateRandomStringArray(5, 5, false, false));
+ }
+ }
+ if (action.actionType() == AliasActions.Type.ADD) {
+ if (randomBoolean()) {
+ action.filter(randomAsciiOfLength(10));
+ }
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ action.routing(randomAsciiOfLength(5));
+ } else {
+ action.searchRouting(randomAsciiOfLength(5));
+ action.indexRouting(randomAsciiOfLength(5));
+ }
+ }
+ }
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ action.writeTo(out);
+ try (StreamInput in = out.bytes().streamInput()) {
+ AliasActions read = new AliasActions(in);
+ assertEquals(action, read);
+ }
+ }
+ }
+
+
+ private Map<String, Object> randomMap(int maxDepth) {
+ int members = between(0, 5);
+ Map<String, Object> result = new HashMap<>(members);
+ for (int i = 0; i < members; i++) {
+ Object value;
+ switch (between(0, 3)) {
+ case 0:
+ if (maxDepth > 0) {
+ value = randomMap(maxDepth - 1);
+ } else {
+ value = randomAsciiOfLength(5);
+ }
+ break;
+ case 1:
+ value = randomAsciiOfLength(5);
+ break;
+ case 2:
+ value = randomBoolean();
+ break;
+ case 3:
+ value = randomLong();
+ break;
+ default:
+ throw new UnsupportedOperationException();
+ }
+ result.put(randomAsciiOfLength(5), value);
+ }
+ return result;
+ }
+
+ private Object randomRouting() {
+ return randomBoolean() ? randomAsciiOfLength(5) : randomInt();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
index ba497ffca3..9d2e56f25b 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
@@ -388,17 +388,22 @@ public class CreateIndexIT extends ESIntegTestCase {
.put("index.blocks.write", true)).get();
ensureGreen();
// now merge source into a single shard index
+
+ final boolean createWithReplicas = randomBoolean();
assertAcked(client().admin().indices().prepareShrinkIndex("source", "target")
- .setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get());
- ensureGreen();
- assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
- // bump replicas
- client().admin().indices().prepareUpdateSettings("target")
- .setSettings(Settings.builder()
- .put("index.number_of_replicas", 1)).get();
+ .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get());
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
+ if (createWithReplicas == false) {
+ // bump replicas
+ client().admin().indices().prepareUpdateSettings("target")
+ .setSettings(Settings.builder()
+ .put("index.number_of_replicas", 1)).get();
+ ensureGreen();
+ assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
+ }
+
for (int i = 20; i < 40; i++) {
client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
index 7a55b22b60..0ca2bd2338 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
@@ -49,7 +49,7 @@ public class FlushBlocksIT extends ESIntegTestCase {
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
try {
enableIndexBlock("test", blockSetting);
- FlushResponse response = client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().actionGet();
+ FlushResponse response = client().admin().indices().prepareFlush("test").execute().actionGet();
assertNoFailures(response);
assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
} finally {
@@ -80,4 +80,4 @@ public class FlushBlocksIT extends ESIntegTestCase {
setClusterReadOnly(false);
}
}
-} \ No newline at end of file
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
index 6566eb96db..d5bc16207f 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
@@ -20,13 +20,21 @@
package org.elasticsearch.action.admin.indices.rollover;
import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import java.util.Collection;
+import java.util.Collections;
import java.util.Map;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@@ -35,6 +43,12 @@ import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class RolloverIT extends ESIntegTestCase {
+ @Override
+ protected Collection<Class<? extends Plugin>> nodePlugins() {
+ return Collections.singleton(InternalSettingsPlugin.class);
+ }
+
+
public void testRolloverOnEmptyIndex() throws Exception {
assertAcked(prepareCreate("test_index-1").addAlias(new Alias("test_alias")).get());
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get();
@@ -161,4 +175,47 @@ public class RolloverIT extends ESIntegTestCase {
assertThat(e.getIndex().getName(), equalTo("test_index-000001"));
}
}
+
+ public void testRolloverWithDateMath() {
+ DateTime now = new DateTime(DateTimeZone.UTC);
+ String index = "test-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now) + "-1";
+ String dateMathExp = "<test-{now/d}-1>";
+ assertAcked(prepareCreate(dateMathExp).addAlias(new Alias("test_alias")).get());
+ ensureGreen(index);
+ // now we modify the provided name such that we can test that the pattern is carried on
+ client().admin().indices().prepareClose(index).get();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME,
+ "<test-{now/M{YYYY.MM}}-1>")).get();
+
+ client().admin().indices().prepareOpen(index).get();
+ ensureGreen(index);
+ RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get();
+ assertThat(response.getOldIndex(), equalTo(index));
+ assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000002"));
+ assertThat(response.isDryRun(), equalTo(false));
+ assertThat(response.isRolledOver(), equalTo(true));
+ assertThat(response.getConditionStatus().size(), equalTo(0));
+
+ response = client().admin().indices().prepareRolloverIndex("test_alias").get();
+ assertThat(response.getOldIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000002"));
+ assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000003"));
+ assertThat(response.isDryRun(), equalTo(false));
+ assertThat(response.isRolledOver(), equalTo(true));
+ assertThat(response.getConditionStatus().size(), equalTo(0));
+
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(response.getOldIndex(),
+ response.getNewIndex()).get();
+ assertEquals("<test-{now/M{YYYY.MM}}-000002>", getSettingsResponse.getSetting(response.getOldIndex(),
+ IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+ assertEquals("<test-{now/M{YYYY.MM}}-000003>", getSettingsResponse.getSetting(response.getNewIndex(),
+ IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+
+ response = client().admin().indices().prepareRolloverIndex("test_alias").setNewIndexName("<test-{now/d}-000004>").get();
+ assertThat(response.getOldIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000003"));
+ assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now) + "-000004"));
+ assertThat(response.isDryRun(), equalTo(false));
+ assertThat(response.isRolledOver(), equalTo(true));
+ assertThat(response.getConditionStatus().size(), equalTo(0));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
index 0feedd1a5d..9e80e92a28 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
@@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
@@ -33,14 +34,18 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.test.ESTestCase;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
import java.util.HashSet;
+import java.util.List;
import java.util.Locale;
import java.util.Set;
import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.hasSize;
public class TransportRolloverActionTests extends ESTestCase {
@@ -97,19 +102,19 @@ public class TransportRolloverActionTests extends ESTestCase {
final IndicesAliasesClusterStateUpdateRequest updateRequest =
TransportRolloverAction.prepareRolloverAliasesUpdateRequest(sourceIndex, targetIndex, rolloverRequest);
- final AliasAction[] actions = updateRequest.actions();
- assertThat(actions.length, equalTo(2));
+ List<AliasAction> actions = updateRequest.actions();
+ assertThat(actions, hasSize(2));
boolean foundAdd = false;
boolean foundRemove = false;
for (AliasAction action : actions) {
- if (action.actionType() == AliasAction.Type.ADD) {
+ if (action.getIndex().equals(targetIndex)) {
+ assertEquals(sourceAlias, ((AliasAction.Add) action).getAlias());
foundAdd = true;
- assertThat(action.index(), equalTo(targetIndex));
- assertThat(action.alias(), equalTo(sourceAlias));
- } else if (action.actionType() == AliasAction.Type.REMOVE) {
+ } else if (action.getIndex().equals(sourceIndex)) {
+ assertEquals(sourceAlias, ((AliasAction.Remove) action).getAlias());
foundRemove = true;
- assertThat(action.index(), equalTo(sourceIndex));
- assertThat(action.alias(), equalTo(sourceAlias));
+ } else {
+ throw new AssertionError("Unknow index [" + action.getIndex() + "]");
}
}
assertTrue(foundAdd);
@@ -153,15 +158,20 @@ public class TransportRolloverActionTests extends ESTestCase {
public void testGenerateRolloverIndexName() throws Exception {
String invalidIndexName = randomAsciiOfLength(10) + "A";
+ IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY);
expectThrows(IllegalArgumentException.class, () ->
- TransportRolloverAction.generateRolloverIndexName(invalidIndexName));
+ TransportRolloverAction.generateRolloverIndexName(invalidIndexName, indexNameExpressionResolver));
int num = randomIntBetween(0, 100);
final String indexPrefix = randomAsciiOfLength(10);
String indexEndingInNumbers = indexPrefix + "-" + num;
- assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers),
+ assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers, indexNameExpressionResolver),
equalTo(indexPrefix + "-" + String.format(Locale.ROOT, "%06d", num + 1)));
- assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-1"), equalTo("index-name-000002"));
- assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-2"), equalTo("index-name-000003"));
+ assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-1", indexNameExpressionResolver),
+ equalTo("index-name-000002"));
+ assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-2", indexNameExpressionResolver),
+ equalTo("index-name-000003"));
+ assertEquals( "<index-name-{now/d}-000002>", TransportRolloverAction.generateRolloverIndexName("<index-name-{now/d}-1>",
+ indexNameExpressionResolver));
}
public void testCreateIndexRequest() throws Exception {
@@ -178,7 +188,7 @@ public class TransportRolloverActionTests extends ESTestCase {
.build();
rolloverRequest.getCreateIndexRequest().settings(settings);
final CreateIndexClusterStateUpdateRequest createIndexRequest =
- TransportRolloverAction.prepareCreateIndexRequest(rolloverIndex, rolloverRequest);
+ TransportRolloverAction.prepareCreateIndexRequest(rolloverIndex, rolloverIndex, rolloverRequest);
assertThat(createIndexRequest.settings(), equalTo(settings));
assertThat(createIndexRequest.index(), equalTo(rolloverIndex));
assertThat(createIndexRequest.cause(), equalTo("rollover_index"));
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java
index 36aad4fb36..4a2895ad7e 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java
@@ -54,7 +54,7 @@ public class IndicesSegmentsRequestTests extends ESSingleNodeTestCase {
String id = Integer.toString(j);
client().prepareIndex("test", "type1", id).setSource("text", "sometext").get();
}
- client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).get();
+ client().admin().indices().prepareFlush("test").get();
}
public void testBasic() {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java
index a887b2f01e..44fb991af9 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java
@@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
-@TestLogging("_root:DEBUG,action.admin.indices.shards:TRACE,cluster.service:TRACE")
+@TestLogging("_root:DEBUG,org.elasticsearch.action.admin.indices.shards:TRACE,org.elasticsearch.cluster.service:TRACE")
public class IndicesShardStoreRequestIT extends ESIntegTestCase {
@Override
@@ -213,7 +213,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
builders[i] = client().prepareIndex(index, "type").setSource("field", "value");
}
indexRandom(true, builders);
- client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet();
+ client().admin().indices().prepareFlush().setForce(true).execute().actionGet();
}
private static final class IndexNodePredicate implements Predicate<Settings> {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java
index 49d0ce447b..0c5164aec5 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java
@@ -42,7 +42,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays;
import java.util.Collections;
@@ -97,7 +97,7 @@ public class TransportShrinkActionTests extends ESTestCase {
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@@ -120,7 +120,7 @@ public class TransportShrinkActionTests extends ESTestCase {
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
index 8493c58729..3c89a6ab74 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
@@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService.PutRequest;
import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.NodeServicesProvider;
@@ -54,12 +55,17 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
Map<String, Object> map = new HashMap<>();
map.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "0");
+ map.put("index.shard.check_on_startup", "blargh");
request.settings(Settings.builder().put(map).build());
List<Throwable> throwables = putTemplate(request);
assertEquals(throwables.size(), 1);
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
- assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
+ assertThat(throwables.get(0).getMessage(),
+ containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1"));
+ assertThat(throwables.get(0).getMessage(),
+ containsString("unknown value for [index.shard.check_on_startup] " +
+ "must be one of [true, false, fix, checksum] but was: blargh"));
}
public void testIndexTemplateValidationAccumulatesValidationErrors() {
@@ -75,7 +81,8 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
assertThat(throwables.get(0).getMessage(), containsString("name must not contain a space"));
assertThat(throwables.get(0).getMessage(), containsString("template must not start with '_'"));
- assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
+ assertThat(throwables.get(0).getMessage(),
+ containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1"));
}
public void testIndexTemplateWithAliasNameEqualToTemplatePattern() {
@@ -158,10 +165,11 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
null,
null,
null,
- new HashSet<>(),
null,
null, null, null);
- MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY), null, null);
+ MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService,
+ new AliasValidator(Settings.EMPTY), null, null,
+ new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS));
final List<Throwable> throwables = new ArrayList<>();
service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {
@@ -188,13 +196,13 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
indicesService,
null,
null,
- new HashSet<>(),
null,
nodeServicesProvider,
null,
null);
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(
- Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, nodeServicesProvider);
+ Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, nodeServicesProvider,
+ new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS));
final List<Throwable> throwables = new ArrayList<>();
final CountDownLatch latch = new CountDownLatch(1);
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
index c88055f8dd..230373f741 100644
--- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
@@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.script.Script;
@@ -39,6 +40,7 @@ import java.util.Map;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@@ -125,49 +127,34 @@ public class BulkRequestTests extends ESTestCase {
public void testSimpleBulk6() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk6.json");
BulkRequest bulkRequest = new BulkRequest();
- try {
- bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
- fail("should have thrown an exception about the wrong format of line 1");
- } catch (IllegalArgumentException e) {
- assertThat("message contains error about the wrong format of line 1: " + e.getMessage(),
- e.getMessage().contains("Malformed action/metadata line [1], expected a simple value for field [_source] but found [START_OBJECT]"), equalTo(true));
- }
+ ParsingException exc = expectThrows(ParsingException.class,
+ () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
+ assertThat(exc.getMessage(), containsString("Unknown key for a VALUE_STRING in [hello]"));
}
public void testSimpleBulk7() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk7.json");
BulkRequest bulkRequest = new BulkRequest();
- try {
- bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
- fail("should have thrown an exception about the wrong format of line 5");
- } catch (IllegalArgumentException e) {
- assertThat("message contains error about the wrong format of line 5: " + e.getMessage(),
- e.getMessage().contains("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"), equalTo(true));
- }
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
+ assertThat(exc.getMessage(),
+ containsString("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"));
}
public void testSimpleBulk8() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk8.json");
BulkRequest bulkRequest = new BulkRequest();
- try {
- bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
- fail("should have thrown an exception about the unknown parameter _foo");
- } catch (IllegalArgumentException e) {
- assertThat("message contains error about the unknown parameter _foo: " + e.getMessage(),
- e.getMessage().contains("Action/metadata line [3] contains an unknown parameter [_foo]"), equalTo(true));
- }
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
+ assertThat(exc.getMessage(), containsString("Action/metadata line [3] contains an unknown parameter [_foo]"));
}
public void testSimpleBulk9() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk9.json");
BulkRequest bulkRequest = new BulkRequest();
- try {
- bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
- fail("should have thrown an exception about the wrong format of line 3");
- } catch (IllegalArgumentException e) {
- assertThat("message contains error about the wrong format of line 3: " + e.getMessage(),
- e.getMessage().contains("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]"), equalTo(true));
- }
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
+ assertThat(exc.getMessage(), containsString("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]"));
}
public void testSimpleBulk10() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java
index 4c24e76c13..590a503a65 100644
--- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java
@@ -62,14 +62,6 @@ import static org.hamcrest.Matchers.nullValue;
public class BulkWithUpdatesIT extends ESIntegTestCase {
@Override
- protected Settings nodeSettings(int nodeOrdinal) {
- return Settings.builder()
- .put(super.nodeSettings(nodeOrdinal))
- .put("script.default_lang", CustomScriptPlugin.NAME)
- .build();
- }
-
- @Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singleton(CustomScriptPlugin.class);
}
@@ -150,21 +142,21 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(bulkResponse.getItems()[2].getResponse().getId(), equalTo("3"));
assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(2L));
- GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").setFields("field").execute()
+ GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").execute()
.actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(2L));
- assertThat(((Number) getResponse.getField("field").getValue()).longValue(), equalTo(2L));
+ assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L));
- getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(2L));
- assertThat(((Number) getResponse.getField("field").getValue()).longValue(), equalTo(3L));
+ assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(3L));
- getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").setFields("field1").execute().actionGet();
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(2L));
- assertThat(getResponse.getField("field1").getValue().toString(), equalTo("test"));
+ assertThat(getResponse.getSource().get("field1").toString(), equalTo("test"));
bulkResponse = client()
.prepareBulk()
@@ -186,18 +178,18 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(bulkResponse.getItems()[2].getResponse().getIndex(), equalTo("test"));
assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(3L));
- getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").setFields("field").execute().actionGet();
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(1L));
- assertThat(((Number) getResponse.getField("field").getValue()).longValue(), equalTo(0L));
+ assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(0L));
- getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").setFields("field").execute().actionGet();
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
- getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(3L));
- assertThat(((Number) getResponse.getField("field").getValue()).longValue(), equalTo(4L));
+ assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(4L));
}
public void testBulkVersioning() throws Exception {
@@ -244,14 +236,11 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
.add(client().prepareUpdate("test", "type", "e1")
.setDoc("field", "2").setVersion(10)) // INTERNAL
.add(client().prepareUpdate("test", "type", "e1")
- .setDoc("field", "3").setVersion(20).setVersionType(VersionType.FORCE))
- .add(client().prepareUpdate("test", "type", "e1")
- .setDoc("field", "4").setVersion(20).setVersionType(VersionType.INTERNAL))
+ .setDoc("field", "3").setVersion(13).setVersionType(VersionType.INTERNAL))
.get();
assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict"));
- assertThat(bulkResponse.getItems()[1].getResponse().getVersion(), equalTo(20L));
- assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(21L));
+ assertThat(bulkResponse.getItems()[1].getFailureMessage(), containsString("version conflict"));
}
public void testBulkUpdateMalformedScripts() throws Exception {
@@ -307,7 +296,8 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
for (int i = 0; i < numDocs; i++) {
builder.add(
client().prepareUpdate()
- .setIndex("test").setType("type1").setId(Integer.toString(i)).setFields("counter")
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setFields("counter")
.setScript(script)
.setUpsert(jsonBuilder().startObject().field("counter", 1).endObject()));
}
@@ -326,11 +316,11 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1));
for (int j = 0; j < 5; j++) {
- GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute()
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute()
.actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getVersion(), equalTo(1L));
- assertThat(((Number) getResponse.getField("counter").getValue()).longValue(), equalTo(1L));
+ assertThat(((Number) getResponse.getSource().get("counter")).longValue(), equalTo(1L));
}
}
@@ -417,8 +407,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE));
for (int j = 0; j < 5; j++) {
- GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute()
- .actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get();
assertThat(getResponse.isExists(), equalTo(false));
}
}
@@ -558,6 +547,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
" \"script\" : {" +
" \"inline\" : \"ctx._source.field2 = 'value2'\"" +
" }," +
+ " \"lang\" : \"" + CustomScriptPlugin.NAME + "\"," +
" \"upsert\" : {" +
" \"field1\" : \"value1'\"" +
" }" +
@@ -590,7 +580,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(bulkResponse.getItems().length, equalTo(3));
assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
- assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(true));
+ assertThat(bulkResponse.getItems()[2].getFailure().getCause().getCause().getMessage(),
+ equalTo("script_lang not supported [painless]"));
client().admin().indices().prepareRefresh("test").get();
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
index 7c39adc76f..4a2f3da952 100644
--- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
+++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
@@ -87,7 +87,8 @@ public class TransportBulkActionTookTests extends ESTestCase {
private TransportBulkAction createAction(boolean controlled, AtomicLong expected) {
CapturingTransport capturingTransport = new CapturingTransport();
- TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool);
+ TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY);
diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java
index dab737cf7f..1d3d2c1b55 100644
--- a/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java
@@ -42,8 +42,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
if (randomBoolean()) {
multiGetRequest.refresh(true);
}
- multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean());
-
MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0);
int numItems = iterations(10, 30);
for (int i = 0; i < numItems; i++) {
@@ -54,7 +52,7 @@ public class MultiGetShardRequestTests extends ESTestCase {
for (int j = 0; j < fields.length; j++) {
fields[j] = randomAsciiOfLength(randomIntBetween(1, 10));
}
- item.fields(fields);
+ item.storedFields(fields);
}
if (randomBoolean()) {
item.version(randomIntBetween(1, Integer.MAX_VALUE));
@@ -79,7 +77,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference()));
assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime()));
assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh()));
- assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size()));
for (int i = 0; i < multiGetShardRequest2.items.size(); i++) {
MultiGetRequest.Item item = multiGetShardRequest.items.get(i);
@@ -87,7 +84,7 @@ public class MultiGetShardRequestTests extends ESTestCase {
assertThat(item2.index(), equalTo(item.index()));
assertThat(item2.type(), equalTo(item.type()));
assertThat(item2.id(), equalTo(item.id()));
- assertThat(item2.fields(), equalTo(item.fields()));
+ assertThat(item2.storedFields(), equalTo(item.storedFields()));
assertThat(item2.version(), equalTo(item.version()));
assertThat(item2.versionType(), equalTo(item.versionType()));
assertThat(item2.fetchSourceContext(), equalTo(item.fetchSourceContext()));
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
index e6fcad5443..da25ec4261 100644
--- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
@@ -21,8 +21,11 @@ package org.elasticsearch.action.index;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
@@ -148,4 +151,43 @@ public class IndexRequestTests extends ESTestCase {
// test negative shard count value not allowed
expectThrows(IllegalArgumentException.class, () -> request.waitForActiveShards(ActiveShardCount.from(randomIntBetween(-10, -1))));
}
+
+ public void testAutoGenIdTimestampIsSet() {
+ IndexRequest request = new IndexRequest("index", "type");
+ request.process(null, true, "index");
+ assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0);
+ request = new IndexRequest("index", "type", "1");
+ request.process(null, true, "index");
+ assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, request.getAutoGeneratedTimestamp());
+ }
+
+ public void testIndexResponse() {
+ ShardId shardId = new ShardId(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), randomIntBetween(0, 1000));
+ String type = randomAsciiOfLengthBetween(3, 10);
+ String id = randomAsciiOfLengthBetween(3, 10);
+ long version = randomLong();
+ boolean created = randomBoolean();
+ IndexResponse indexResponse = new IndexResponse(shardId, type, id, version, created);
+ int total = randomIntBetween(1, 10);
+ int successful = randomIntBetween(1, 10);
+ ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful);
+ indexResponse.setShardInfo(shardInfo);
+ boolean forcedRefresh = false;
+ if (randomBoolean()) {
+ forcedRefresh = randomBoolean();
+ indexResponse.setForcedRefresh(forcedRefresh);
+ }
+ assertEquals(type, indexResponse.getType());
+ assertEquals(id, indexResponse.getId());
+ assertEquals(version, indexResponse.getVersion());
+ assertEquals(shardId, indexResponse.getShardId());
+ assertEquals(created ? RestStatus.CREATED : RestStatus.OK, indexResponse.status());
+ assertEquals(total, indexResponse.getShardInfo().getTotal());
+ assertEquals(successful, indexResponse.getShardInfo().getSuccessful());
+ assertEquals(forcedRefresh, indexResponse.forcedRefresh());
+ assertEquals("IndexResponse[index=" + shardId.getIndexName() + ",type=" + type + ",id="+ id +
+ ",version=" + version + ",result=" + (created ? "created" : "updated") +
+ ",shards={\"_shards\":{\"total\":" + total + ",\"successful\":" + successful + ",\"failed\":0}}]",
+ indexResponse.toString());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java
index 331a956e8a..2b9f9c5532 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java
@@ -162,7 +162,7 @@ public class IngestActionFilterTests extends ESTestCase {
PipelineStore store = mock(PipelineStore.class);
Processor processor = new TestProcessor(ingestDocument -> ingestDocument.setFieldValue("field2", "value2"));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", new CompoundProcessor(processor)));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", randomInt(), new CompoundProcessor(processor)));
executionService = new PipelineExecutionService(store, threadPool);
IngestService ingestService = mock(IngestService.class);
when(ingestService.getPipelineExecutionService()).thenReturn(executionService);
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java
index a602465197..50bd3771bc 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java
@@ -33,29 +33,30 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import org.hamcrest.CustomTypeSafeMatcher;
-import org.mockito.stubbing.Answer;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.same;
-import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
@@ -67,7 +68,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
private TransportService transportService;
@SuppressWarnings("unchecked")
- private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes) {
+ private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes, TransportInterceptor interceptor) {
ClusterState.Builder clusterState = new ClusterState.Builder(new ClusterName("_name"));
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder();
DiscoveryNode localNode = null;
@@ -88,7 +89,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.localNode()).thenReturn(localNode);
when(clusterService.state()).thenReturn(clusterState.build());
- transportService = mock(TransportService.class);
+ transportService = new TransportService(Settings.EMPTY, null, null, interceptor);
return new IngestProxyActionFilter(clusterService, transportService);
}
@@ -97,7 +98,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionListener actionListener = mock(ActionListener.class);
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
int totalNodes = randomIntBetween(1, 5);
- IngestProxyActionFilter filter = buildFilter(0, totalNodes);
+ IngestProxyActionFilter filter = buildFilter(0, totalNodes, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
String action;
ActionRequest request;
@@ -114,7 +115,6 @@ public class IngestProxyActionFilterTests extends ESTestCase {
} catch(IllegalStateException e) {
assertThat(e.getMessage(), equalTo("There are no ingest nodes in this cluster, unable to forward request to an ingest node."));
}
- verifyZeroInteractions(transportService);
verifyZeroInteractions(actionFilterChain);
verifyZeroInteractions(actionListener);
}
@@ -124,7 +124,8 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionListener actionListener = mock(ActionListener.class);
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
int totalNodes = randomIntBetween(1, 5);
- IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes);
+ IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
String action;
ActionRequest request;
@@ -136,7 +137,6 @@ public class IngestProxyActionFilterTests extends ESTestCase {
request = new BulkRequest().add(new IndexRequest());
}
filter.apply(task, action, request, actionListener, actionFilterChain);
- verifyZeroInteractions(transportService);
verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener));
verifyZeroInteractions(actionListener);
}
@@ -147,11 +147,11 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
ActionRequest request = mock(ActionRequest.class);
int totalNodes = randomIntBetween(1, 5);
- IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes);
+ IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
String action = randomAsciiOfLengthBetween(1, 20);
filter.apply(task, action, request, actionListener, actionFilterChain);
- verifyZeroInteractions(transportService);
verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener));
verifyZeroInteractions(actionListener);
}
@@ -162,19 +162,31 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionListener actionListener = mock(ActionListener.class);
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
int totalNodes = randomIntBetween(2, 5);
- IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
- Answer<Void> answer = invocationOnMock -> {
- TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
- transportResponseHandler.handleResponse(new IndexResponse());
- return null;
- };
- doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
+ AtomicBoolean run = new AtomicBoolean(false);
+
+ IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
+ new TransportInterceptor() {
+ @Override
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return new AsyncSender() {
+ @Override
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options,
+ TransportResponseHandler<T> handler) {
+ assertTrue(run.compareAndSet(false, true));
+ assertTrue(node.isIngestNode());
+ assertEquals(action, IndexAction.NAME);
+ handler.handleResponse((T) new IndexResponse());
+ }
+ };
+ }
+ });
IndexRequest indexRequest = new IndexRequest().setPipeline("_id");
filter.apply(task, IndexAction.NAME, indexRequest, actionListener, actionFilterChain);
- verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(IndexAction.NAME), same(indexRequest), any(TransportResponseHandler.class));
verifyZeroInteractions(actionFilterChain);
+ assertTrue(run.get());
verify(actionListener).onResponse(any(IndexResponse.class));
verify(actionListener, never()).onFailure(any(TransportException.class));
}
@@ -185,13 +197,24 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionListener actionListener = mock(ActionListener.class);
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
int totalNodes = randomIntBetween(2, 5);
- IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
- Answer<Void> answer = invocationOnMock -> {
- TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
- transportResponseHandler.handleResponse(new BulkResponse(null, -1));
- return null;
- };
- doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
+ AtomicBoolean run = new AtomicBoolean(false);
+ IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
+ new TransportInterceptor() {
+ @Override
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return new AsyncSender() {
+ @Override
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options,
+ TransportResponseHandler<T> handler) {
+ assertTrue(run.compareAndSet(false, true));
+ assertTrue(node.isIngestNode());
+ assertEquals(action, BulkAction.NAME);
+ handler.handleResponse((T) new BulkResponse(null, -1));
+ }
+ };
+ }
+ });
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest().setPipeline("_id"));
@@ -200,11 +223,10 @@ public class IngestProxyActionFilterTests extends ESTestCase {
bulkRequest.add(new IndexRequest());
}
filter.apply(task, BulkAction.NAME, bulkRequest, actionListener, actionFilterChain);
-
- verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(BulkAction.NAME), same(bulkRequest), any(TransportResponseHandler.class));
verifyZeroInteractions(actionFilterChain);
verify(actionListener).onResponse(any(BulkResponse.class));
verify(actionListener, never()).onFailure(any(TransportException.class));
+ assertTrue(run.get());
}
@SuppressWarnings("unchecked")
@@ -213,30 +235,39 @@ public class IngestProxyActionFilterTests extends ESTestCase {
ActionListener actionListener = mock(ActionListener.class);
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
int totalNodes = randomIntBetween(2, 5);
- IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
- Answer<Void> answer = invocationOnMock -> {
- TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
- transportResponseHandler.handleException(new TransportException(new IllegalArgumentException()));
- return null;
- };
- doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
-
- String action;
+ String requestAction;
ActionRequest request;
if (randomBoolean()) {
- action = IndexAction.NAME;
+ requestAction = IndexAction.NAME;
request = new IndexRequest().setPipeline("_id");
} else {
- action = BulkAction.NAME;
+ requestAction = BulkAction.NAME;
request = new BulkRequest().add(new IndexRequest().setPipeline("_id"));
}
-
- filter.apply(task, action, request, actionListener, actionFilterChain);
-
- verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(action), same(request), any(TransportResponseHandler.class));
+ AtomicBoolean run = new AtomicBoolean(false);
+ IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
+ new TransportInterceptor() {
+ @Override
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return new AsyncSender() {
+ @Override
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options,
+ TransportResponseHandler<T> handler) {
+ assertTrue(run.compareAndSet(false, true));
+ assertTrue(node.isIngestNode());
+ assertEquals(action, requestAction);
+ handler.handleException(new TransportException(new IllegalArgumentException()));
+ }
+ };
+ }
+ });
+ filter.apply(task, requestAction, request, actionListener, actionFilterChain);
verifyZeroInteractions(actionFilterChain);
verify(actionListener).onFailure(any(TransportException.class));
verify(actionListener, never()).onResponse(any(TransportResponse.class));
+ assertTrue(run.get());
+
}
private static class IngestNodeMatcher extends CustomTypeSafeMatcher<DiscoveryNode> {
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java
index 544e2932b4..83aad26f6a 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
index 8cf0550981..a4320d2641 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
@@ -34,7 +34,7 @@ import org.junit.Before;
import java.util.Collections;
import java.util.Map;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
@@ -43,6 +43,8 @@ import static org.hamcrest.Matchers.sameInstance;
public class SimulateExecutionServiceTests extends ESTestCase {
+ private final Integer version = randomBoolean() ? randomInt() : null;
+
private ThreadPool threadPool;
private SimulateExecutionService executionService;
private IngestDocument ingestDocument;
@@ -65,7 +67,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
public void testExecuteVerboseItem() throws Exception {
TestProcessor processor = new TestProcessor("test-id", "mock", ingestDocument -> {});
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor, processor));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor, processor));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
assertThat(processor.getInvokedCounter(), equalTo(2));
assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class));
@@ -90,7 +92,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
public void testExecuteItem() throws Exception {
TestProcessor processor = new TestProcessor("processor_0", "mock", ingestDocument -> {});
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor, processor));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor, processor));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, false);
assertThat(processor.getInvokedCounter(), equalTo(2));
assertThat(actualItemResponse, instanceOf(SimulateDocumentBaseResult.class));
@@ -103,7 +105,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
TestProcessor processor1 = new TestProcessor("processor_0", "mock", ingestDocument -> {});
TestProcessor processor2 = new TestProcessor("processor_1", "mock", ingestDocument -> { throw new RuntimeException("processor failed"); });
TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {});
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor1, processor2, processor3));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor1, processor2, processor3));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
assertThat(processor1.getInvokedCounter(), equalTo(1));
assertThat(processor2.getInvokedCounter(), equalTo(1));
@@ -127,7 +129,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
TestProcessor processor1 = new TestProcessor("processor_0", "mock", ingestDocument -> { throw new RuntimeException("processor failed"); });
TestProcessor processor2 = new TestProcessor("processor_1", "mock", ingestDocument -> {});
TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {});
- Pipeline pipeline = new Pipeline("_id", "_description",
+ Pipeline pipeline = new Pipeline("_id", "_description", version,
new CompoundProcessor(new CompoundProcessor(false, Collections.singletonList(processor1),
Collections.singletonList(processor2)), processor3));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
@@ -163,7 +165,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
RuntimeException exception = new RuntimeException("processor failed");
TestProcessor testProcessor = new TestProcessor("processor_0", "mock", ingestDocument -> { throw exception; });
CompoundProcessor processor = new CompoundProcessor(true, Collections.singletonList(testProcessor), Collections.emptyList());
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
assertThat(testProcessor.getInvokedCounter(), equalTo(1));
assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class));
@@ -179,7 +181,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
public void testExecuteVerboseItemWithoutExceptionAndWithIgnoreFailure() throws Exception {
TestProcessor testProcessor = new TestProcessor("processor_0", "mock", ingestDocument -> { });
CompoundProcessor processor = new CompoundProcessor(true, Collections.singletonList(testProcessor), Collections.emptyList());
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
assertThat(testProcessor.getInvokedCounter(), equalTo(1));
assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class));
@@ -194,7 +196,7 @@ public class SimulateExecutionServiceTests extends ESTestCase {
public void testExecuteItemWithFailure() throws Exception {
TestProcessor processor = new TestProcessor(ingestDocument -> { throw new RuntimeException("processor failed"); });
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor, processor));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor, processor));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, false);
assertThat(processor.getInvokedCounter(), equalTo(1));
assertThat(actualItemResponse, instanceOf(SimulateDocumentBaseResult.class));
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java
index 8418c886be..ab5d30c6f9 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java
@@ -54,7 +54,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase {
public void init() throws IOException {
TestProcessor processor = new TestProcessor(ingestDocument -> {});
CompoundProcessor pipelineCompoundProcessor = new CompoundProcessor(processor);
- Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, pipelineCompoundProcessor);
+ Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, null, pipelineCompoundProcessor);
Map<String, Processor.Factory> registry =
Collections.singletonMap("mock_processor", (factories, tag, config) -> processor);
store = mock(PipelineStore.class);
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
index 2c2506308a..0966010a8f 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
@@ -20,22 +20,13 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.ingest.IngestDocument;
-import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.CoreMatchers.nullValue;
public class SimulatePipelineRequestTests extends ESTestCase {
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java
index 485dc8934c..ad308b01bf 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java
@@ -30,7 +30,7 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.nullValue;
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java
index f6ffc03534..75d2d5834f 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java b/core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java
index 5df0aa1de0..bc72094558 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java
@@ -34,7 +34,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
-import static org.elasticsearch.ingest.IngestDocumentTests.assertIngestDocument;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
diff --git a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
index a8c550e01c..05dcb9d1f1 100644
--- a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
@@ -30,7 +30,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -42,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
-import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.equalTo;
@@ -56,11 +54,12 @@ public class MainActionTests extends ESTestCase {
public void testMainResponseSerialization() throws IOException {
final String nodeName = "node1";
final ClusterName clusterName = new ClusterName("cluster1");
+ final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
final boolean available = randomBoolean();
final Version version = Version.CURRENT;
final Build build = Build.CURRENT;
- final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
+ final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
BytesStreamOutput streamOutput = new BytesStreamOutput();
mainResponse.writeTo(streamOutput);
final MainResponse serialized = new MainResponse();
@@ -74,11 +73,21 @@ public class MainActionTests extends ESTestCase {
}
public void testMainResponseXContent() throws IOException {
- final MainResponse mainResponse = new MainResponse("node1", Version.CURRENT, new ClusterName("cluster1"), Build.CURRENT, false);
- final String expected = "{\"name\":\"node1\",\"cluster_name\":\"cluster1\",\"version\":{\"number\":\"" + Version.CURRENT.toString()
- + "\",\"build_hash\":\"" + Build.CURRENT.shortHash() + "\",\"build_date\":\"" + Build.CURRENT.date() + "\"," +
- "\"build_snapshot\":" + Build.CURRENT.isSnapshot() + ",\"lucene_version\":\"" + Version.CURRENT.luceneVersion.toString() +
- "\"},\"tagline\":\"You Know, for Search\"}";
+ String clusterUUID = randomAsciiOfLengthBetween(10, 20);
+ final MainResponse mainResponse = new MainResponse("node1", Version.CURRENT, new ClusterName("cluster1"), clusterUUID,
+ Build.CURRENT, false);
+ final String expected = "{" +
+ "\"name\":\"node1\"," +
+ "\"cluster_name\":\"cluster1\"," +
+ "\"cluster_uuid\":\"" + clusterUUID + "\"," +
+ "\"version\":{" +
+ "\"number\":\"" + Version.CURRENT.toString() + "\"," +
+ "\"build_hash\":\"" + Build.CURRENT.shortHash() + "\"," +
+ "\"build_date\":\"" + Build.CURRENT.date() + "\"," +
+ "\"build_snapshot\":" + Build.CURRENT.isSnapshot() +
+ ",\"lucene_version\":\"" + Version.CURRENT.luceneVersion.toString() +
+ "\"}," +
+ "\"tagline\":\"You Know, for Search\"}";
XContentBuilder builder = XContentFactory.jsonBuilder();
mainResponse.toXContent(builder, ToXContent.EMPTY_PARAMS);
@@ -111,7 +120,8 @@ public class MainActionTests extends ESTestCase {
ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build();
when(clusterService.state()).thenReturn(state);
- TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), mock(TransportService.class),
+ TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), new TransportService(Settings.EMPTY,
+ null ,null, TransportService.NOOP_TRANSPORT_INTERCEPTOR),
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService);
AtomicReference<MainResponse> responseRef = new AtomicReference<>();
action.doExecute(new MainRequest(), new ActionListener<MainResponse>() {
diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
index 3c9acb4104..d5b17861e0 100644
--- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
@@ -175,6 +175,6 @@ public class MultiSearchRequestTests extends ESTestCase {
IndicesQueriesRegistry registry = new IndicesQueriesRegistry();
QueryParser<MatchAllQueryBuilder> parser = MatchAllQueryBuilder::fromXContent;
registry.register(parser, MatchAllQueryBuilder.NAME);
- return new SearchRequestParsers(registry, null, null);
+ return new SearchRequestParsers(registry, null, null, null);
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
index 33ef23db6e..2778a9dbf4 100644
--- a/core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
@@ -17,10 +17,11 @@
* under the License.
*/
-package org.elasticsearch.search.controller;
+package org.elasticsearch.action.search;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.action.search.SearchPhaseController;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays;
diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java
index 011fb17251..9df5bc8223 100644
--- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java
@@ -57,8 +57,12 @@ public class TransportMultiSearchActionTests extends ESTestCase {
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
ThreadPool threadPool = new ThreadPool(settings);
TaskManager taskManager = mock(TaskManager.class);
- TransportService transportService = mock(TransportService.class);
- when(transportService.getTaskManager()).thenReturn(taskManager);
+ TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR) {
+ @Override
+ public TaskManager getTaskManager() {
+ return taskManager;
+ }
+ };
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build());
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY);
diff --git a/core/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java b/core/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java
index eb8e9680ce..598a672fb2 100644
--- a/core/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java
@@ -146,6 +146,25 @@ public class ActiveShardCountTests extends ESTestCase {
assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName));
}
+ public void testEnoughShardsActiveValueBased() {
+ // enough shards active case
+ int threshold = randomIntBetween(1, 50);
+ ActiveShardCount waitForActiveShards = ActiveShardCount.from(randomIntBetween(0, threshold));
+ assertTrue(waitForActiveShards.enoughShardsActive(randomIntBetween(threshold, 50)));
+ // not enough shards active
+ waitForActiveShards = ActiveShardCount.from(randomIntBetween(threshold, 50));
+ assertFalse(waitForActiveShards.enoughShardsActive(randomIntBetween(0, threshold - 1)));
+ // wait for zero shards should always pass
+ assertTrue(ActiveShardCount.from(0).enoughShardsActive(randomIntBetween(0, 50)));
+ // invalid values
+ Exception e = expectThrows(IllegalStateException.class, () -> ActiveShardCount.ALL.enoughShardsActive(randomIntBetween(0, 50)));
+ assertEquals("not enough information to resolve to shard count", e.getMessage());
+ e = expectThrows(IllegalStateException.class, () -> ActiveShardCount.DEFAULT.enoughShardsActive(randomIntBetween(0, 50)));
+ assertEquals("not enough information to resolve to shard count", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> ActiveShardCount.NONE.enoughShardsActive(randomIntBetween(-10, -1)));
+ assertEquals("activeShardCount cannot be negative", e.getMessage());
+ }
+
private void runTestForOneActiveShard(final ActiveShardCount activeShardCount) {
final String indexName = "test-idx";
final int numberOfShards = randomIntBetween(1, 5);
diff --git a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java
index 9572a2df65..8a45ca4753 100644
--- a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java
@@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.test.ESTestCase;
@@ -35,10 +36,12 @@ public class AutoCreateIndexTests extends ESTestCase {
public void testParseFailed() {
try {
- new AutoCreateIndex(Settings.builder().put("action.auto_create_index", ",,,").build(), new IndexNameExpressionResolver(Settings.EMPTY));
+ Settings settings = Settings.builder().put("action.auto_create_index", ",,,").build();
+ newAutoCreateIndex(settings);
fail("initialization should have failed");
} catch (IllegalArgumentException ex) {
- assertEquals("Can't parse [,,,] for setting [action.auto_create_index] must be either [true, false, or a comma separated list of index patterns]", ex.getMessage());
+ assertEquals("Can't parse [,,,] for setting [action.auto_create_index] must be either [true, false, or a " +
+ "comma separated list of index patterns]", ex.getMessage());
}
}
@@ -46,46 +49,51 @@ public class AutoCreateIndexTests extends ESTestCase {
String prefix = randomFrom("+", "-");
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), prefix).build();
try {
- new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ newAutoCreateIndex(settings);
fail("initialization should have failed");
} catch(IllegalArgumentException ex) {
- assertEquals("Can't parse [" + prefix + "] for setting [action.auto_create_index] must contain an index name after [" + prefix + "]", ex.getMessage());
+ assertEquals("Can't parse [" + prefix + "] for setting [action.auto_create_index] must contain an index name after ["
+ + prefix + "]", ex.getMessage());
}
}
public void testAutoCreationDisabled() {
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(false));
}
public void testAutoCreationEnabled() {
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(true));
}
public void testDefaultAutoCreation() {
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(Settings.EMPTY, new IndexNameExpressionResolver(Settings.EMPTY));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(Settings.EMPTY);
assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(true));
}
public void testExistingIndex() {
- Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, false, randomAsciiOfLengthBetween(7, 10))).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY));
- assertThat(autoCreateIndex.shouldAutoCreate(randomFrom("index1", "index2", "index3"), buildClusterState("index1", "index2", "index3")), equalTo(false));
+ Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, false,
+ randomAsciiOfLengthBetween(7, 10))).build();
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
+ assertThat(autoCreateIndex.shouldAutoCreate(randomFrom("index1", "index2", "index3"),
+ buildClusterState("index1", "index2", "index3")), equalTo(false));
}
public void testDynamicMappingDisabled() {
- Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, randomAsciiOfLengthBetween(1, 10)))
+ Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true,
+ randomAsciiOfLengthBetween(1, 10)))
.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(Settings.EMPTY));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
assertThat(autoCreateIndex.shouldAutoCreate(randomAsciiOfLengthBetween(1, 10), buildClusterState()), equalTo(false));
}
public void testAutoCreationPatternEnabled() {
- Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+index*", "index*")).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+index*", "index*"))
+ .build();
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(true));
assertThat(autoCreateIndex.shouldAutoCreate("does_not_match" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false));
@@ -93,7 +101,7 @@ public class AutoCreateIndexTests extends ESTestCase {
public void testAutoCreationPatternDisabled() {
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "-index*").build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false));
//default is false when patterns are specified
@@ -101,8 +109,9 @@ public class AutoCreateIndexTests extends ESTestCase {
}
public void testAutoCreationMultiplePatternsWithWildcards() {
- Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+test*,-index*", "test*,-index*")).build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(),
+ randomFrom("+test*,-index*", "test*,-index*")).build();
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false));
assertThat(autoCreateIndex.shouldAutoCreate("test" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(true));
@@ -111,7 +120,7 @@ public class AutoCreateIndexTests extends ESTestCase {
public void testAutoCreationMultiplePatternsNoWildcards() {
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "+test1,-index1").build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true));
assertThat(autoCreateIndex.shouldAutoCreate("index" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false));
@@ -121,7 +130,7 @@ public class AutoCreateIndexTests extends ESTestCase {
public void testAutoCreationMultipleIndexNames() {
Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "test1,test2").build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true));
assertThat(autoCreateIndex.shouldAutoCreate("test2", clusterState), equalTo(true));
@@ -129,19 +138,51 @@ public class AutoCreateIndexTests extends ESTestCase {
}
public void testAutoCreationConflictingPatternsFirstWins() {
- Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "+test1,-test1,-test2,+test2").build();
- AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new IndexNameExpressionResolver(settings));
+ Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(),
+ "+test1,-test1,-test2,+test2").build();
+ AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()).build();
assertThat(autoCreateIndex.shouldAutoCreate("test1", clusterState), equalTo(true));
assertThat(autoCreateIndex.shouldAutoCreate("test2", clusterState), equalTo(false));
assertThat(autoCreateIndex.shouldAutoCreate("does_not_match" + randomAsciiOfLengthBetween(1, 5), clusterState), equalTo(false));
}
+ public void testUpdate() {
+ boolean value = randomBoolean();
+ Settings settings;
+ if (value && randomBoolean()) {
+ settings = Settings.EMPTY;
+ } else {
+ settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), value).build();
+ }
+
+ ClusterSettings clusterSettings = new ClusterSettings(settings,
+ ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, new IndexNameExpressionResolver(settings));
+ assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(value));
+
+ Settings newSettings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), !value).build();
+ clusterSettings.applySettings(newSettings);
+ assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(!value));
+
+ newSettings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), "logs-*").build();
+ clusterSettings.applySettings(newSettings);
+ assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(true));
+ assertThat(autoCreateIndex.getAutoCreate().getExpressions().size(), equalTo(1));
+ assertThat(autoCreateIndex.getAutoCreate().getExpressions().get(0).v1(), equalTo("logs-*"));
+ }
+
private static ClusterState buildClusterState(String... indices) {
MetaData.Builder metaData = MetaData.builder();
for (String index : indices) {
metaData.put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1));
}
- return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).build();
+ return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metaData(metaData).build();
+ }
+
+ private AutoCreateIndex newAutoCreateIndex(Settings settings) {
+ return new AutoCreateIndex(settings, new ClusterSettings(settings,
+ ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new IndexNameExpressionResolver(settings));
}
}
diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
index 7b23738303..a249a0e98e 100644
--- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
@@ -191,7 +191,8 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(THREAD_POOL);
- final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
+ final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
setClusterState(clusterService, TEST_INDEX);
diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java
index b30a343547..87f86c3f59 100644
--- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java
+++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java
@@ -23,7 +23,7 @@ import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoverySettings;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
index 9aeafcac0e..a7db99cc20 100644
--- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
@@ -87,7 +87,7 @@ public class TransportMasterNodeActionTests extends ESTestCase {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
- transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
+ transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java
index 744a116f4a..67cc64cb87 100644
--- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java
@@ -177,7 +177,8 @@ public class TransportNodesActionTests extends ESTestCase {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(THREAD_POOL);
- transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
+ transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
int numNodes = randomIntBetween(3, 10);
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
index b5b2cbeb73..2d098a065b 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
@@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.support.replication;
-import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ShardOperationFailedException;
@@ -92,7 +91,7 @@ public class BroadcastReplicationTests extends ESTestCase {
super.setUp();
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(Collections.emptyList()), circuitBreakerService);
clusterService = createClusterService(threadPool);
- transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
+ transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), null);
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
index 813e4f630c..646ad23a48 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
@@ -115,7 +115,7 @@ public class ClusterStateCreationUtils {
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true,
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, true,
primaryState, unassignedInfo));
for (ShardRoutingState replicaState : replicaStates) {
@@ -132,7 +132,7 @@ public class ClusterStateCreationUtils {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(
- TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState,
+ TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState,
unassignedInfo));
}
@@ -170,10 +170,10 @@ public class ClusterStateCreationUtils {
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, null, true,
- ShardRoutingState.STARTED, null));
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, null, false,
- ShardRoutingState.STARTED, null));
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, true,
+ ShardRoutingState.STARTED));
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, false,
+ ShardRoutingState.STARTED));
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java
index 2e440d921e..b5edc1b53c 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.support.replication;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
@@ -26,14 +27,16 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.IndexShardNotStartedException;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
@@ -69,7 +72,8 @@ public class ReplicationOperationTests extends ESTestCase {
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
- final long primaryTerm = state.getMetaData().index(index).primaryTerm(0);
+ IndexMetaData indexMetaData = state.getMetaData().index(index);
+ final long primaryTerm = indexMetaData.primaryTerm(0);
final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId);
ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
@@ -78,6 +82,10 @@ public class ReplicationOperationTests extends ESTestCase {
.nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
primaryShard = primaryShard.getTargetRelocatingShard();
}
+ // add a few in-sync allocation ids that don't have corresponding routing entries
+ Set<String> staleAllocationIds = Sets.newHashSet(generateRandomStringArray(4, 10, false));
+ state = ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).put(IndexMetaData.builder(indexMetaData)
+ .putInSyncAllocationIds(0, Sets.union(indexMetaData.inSyncAllocationIds(0), staleAllocationIds)))).build();
final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);
@@ -112,6 +120,7 @@ public class ReplicationOperationTests extends ESTestCase {
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards));
+ assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds));
assertTrue("listener is not marked as done", listener.isDone());
ShardInfo shardInfo = listener.actionGet().getShardInfo();
assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size()));
@@ -155,7 +164,8 @@ public class ReplicationOperationTests extends ESTestCase {
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(2), randomInt(2));
- final long primaryTerm = state.getMetaData().index(index).primaryTerm(0);
+ IndexMetaData indexMetaData = state.getMetaData().index(index);
+ final long primaryTerm = indexMetaData.primaryTerm(0);
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
@@ -163,6 +173,10 @@ public class ReplicationOperationTests extends ESTestCase {
.nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
primaryShard = primaryShard.getTargetRelocatingShard();
}
+ // add in-sync allocation id that doesn't have a corresponding routing entry
+ state = ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).put(IndexMetaData.builder(indexMetaData)
+ .putInSyncAllocationIds(0, Sets.union(indexMetaData.inSyncAllocationIds(0), Sets.newHashSet(randomAsciiOfLength(10))))))
+ .build();
final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);
@@ -173,13 +187,28 @@ public class ReplicationOperationTests extends ESTestCase {
Request request = new Request(shardId);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final ClusterState finalState = state;
+ final boolean testPrimaryDemotedOnStaleShardCopies = randomBoolean();
final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) {
@Override
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception,
Runnable onSuccess, Consumer<Exception> onPrimaryDemoted,
Consumer<Exception> onIgnoredFailure) {
- assertThat(replica, equalTo(failedReplica));
- onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
+ if (testPrimaryDemotedOnStaleShardCopies) {
+ super.failShard(replica, primaryTerm, message, exception, onSuccess, onPrimaryDemoted, onIgnoredFailure);
+ } else {
+ assertThat(replica, equalTo(failedReplica));
+ onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
+ }
+ }
+
+ @Override
+ public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ if (testPrimaryDemotedOnStaleShardCopies) {
+ onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
+ } else {
+ super.markShardCopyAsStale(shardId, allocationId, primaryTerm, onSuccess, onPrimaryDemoted, onIgnoredFailure);
+ }
}
};
AtomicBoolean primaryFailed = new AtomicBoolean();
@@ -403,6 +432,8 @@ public class ReplicationOperationTests extends ESTestCase {
final Set<ShardRouting> failedReplicas = ConcurrentCollections.newConcurrentSet();
+ final Set<String> markedAsStaleCopies = ConcurrentCollections.newConcurrentSet();
+
TestReplicaProxy() {
this(Collections.emptyMap());
}
@@ -437,6 +468,19 @@ public class ReplicationOperationTests extends ESTestCase {
fail("replica [" + replica + "] was failed");
}
}
+
+ @Override
+ public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ if (markedAsStaleCopies.add(allocationId) == false) {
+ fail("replica [" + allocationId + "] was marked as stale twice");
+ }
+ if (randomBoolean()) {
+ onSuccess.run();
+ } else {
+ onIgnoredFailure.accept(new ElasticsearchException("simulated"));
+ }
+ }
}
class TestReplicationOperation extends ReplicationOperation<Request, Request, TestPrimary.Result> {
@@ -447,7 +491,7 @@ public class ReplicationOperationTests extends ESTestCase {
public TestReplicationOperation(Request request, Primary<Request, Request, TestPrimary.Result> primary,
ActionListener<TestPrimary.Result> listener, boolean executeOnReplicas,
- Replicas<Request> replicas, Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
+ Replicas<Request> replicas, Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
super(request, primary, listener, executeOnReplicas, replicas, clusterStateSupplier, logger, opType);
}
}
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index 6b1d0dd088..f1e51d6224 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
@@ -36,32 +37,36 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
+import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
+import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseOptions;
import org.elasticsearch.transport.TransportService;
@@ -75,12 +80,12 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
+import java.util.Locale;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
@@ -93,18 +98,40 @@ import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TransportReplicationActionTests extends ESTestCase {
+ /**
+ * takes a request that was sent by a {@link TransportReplicationAction} and captured
+ * and returns the underlying request if it's wrapped or the original (cast to the expected type).
+ *
+ * This will throw a {@link ClassCastException} if the request is of the wrong type.
+ */
+ public static <R extends ReplicationRequest> R resolveRequest(TransportRequest requestOrWrappedRequest) {
+ if (requestOrWrappedRequest instanceof TransportReplicationAction.ConcreteShardRequest) {
+ requestOrWrappedRequest = ((TransportReplicationAction.ConcreteShardRequest<?>)requestOrWrappedRequest).getRequest();
+ }
+ return (R) requestOrWrappedRequest;
+ }
+
private static ThreadPool threadPool;
private ClusterService clusterService;
private TransportService transportService;
private CapturingTransport transport;
private Action action;
+ private ShardStateAction shardStateAction;
+
/* *
* TransportReplicationAction needs an instance of IndexShard to count operations.
* indexShards is reset to null before each test and will be initialized upon request in the tests.
@@ -121,10 +148,12 @@ public class TransportReplicationActionTests extends ESTestCase {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
- transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
+ transportService = new TransportService(clusterService.getSettings(), transport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
- action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool);
+ shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool);
+ action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, shardStateAction, threadPool);
}
@After
@@ -169,12 +198,14 @@ public class TransportReplicationActionTests extends ESTestCase {
reroutePhase.run();
assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class);
assertPhase(task, "failed");
+ assertFalse(request.isRetrySet.get());
listener = new PlainActionFuture<>();
- reroutePhase = action.new ReroutePhase(task, new Request(), listener);
+ reroutePhase = action.new ReroutePhase(task, request = new Request(), listener);
reroutePhase.run();
assertFalse("primary phase should wait on retryable block", listener.isDone());
assertPhase(task, "waiting_for_retry");
+ assertTrue(request.isRetrySet.get());
block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
@@ -204,6 +235,7 @@ public class TransportReplicationActionTests extends ESTestCase {
reroutePhase.run();
assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class);
assertPhase(task, "failed");
+ assertTrue(request.isRetrySet.get());
request = new Request(shardId);
listener = new PlainActionFuture<>();
@@ -211,6 +243,7 @@ public class TransportReplicationActionTests extends ESTestCase {
reroutePhase.run();
assertFalse("unassigned primary didn't cause a retry", listener.isDone());
assertPhase(task, "waiting_for_retry");
+ assertTrue(request.isRetrySet.get());
setState(clusterService, state(index, true, ShardRoutingState.STARTED));
logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint());
@@ -249,19 +282,20 @@ public class TransportReplicationActionTests extends ESTestCase {
Action.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class);
+ assertTrue(request.isRetrySet.compareAndSet(true, false));
request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1);
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertFalse("cluster state too old didn't cause a retry", listener.isDone());
+ assertTrue(request.isRetrySet.get());
// finish relocation
ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId)
.shardsWithState(ShardRoutingState.INITIALIZING).get(0);
AllocationService allocationService = ESAllocationTestCase.createAllocationService();
- RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));
- ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build();
+ ClusterState updatedState = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));
setState(clusterService, updatedState);
logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint());
@@ -290,11 +324,14 @@ public class TransportReplicationActionTests extends ESTestCase {
reroutePhase.run();
assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class);
assertPhase(task, "failed");
+ assertTrue(request.isRetrySet.get());
request = new Request(new ShardId(index, "_na_", 10)).timeout("1ms");
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class);
+ assertFalse(request.isRetrySet.get()); //TODO I'd have expected this to be true but we fail too early?
+
}
public void testStalePrimaryShardOnReroute() throws InterruptedException {
@@ -319,6 +356,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertThat(capturedRequests, arrayWithSize(1));
assertThat(capturedRequests[0].action, equalTo("testAction[p]"));
assertPhase(task, "waiting_on_primary");
+ assertFalse(request.isRetrySet.get());
transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId));
@@ -380,6 +418,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertThat(capturedRequests.get(0).action, equalTo("testAction"));
assertPhase(task, "rerouted");
}
+ assertFalse(request.isRetrySet.get());
assertIndexShardUninitialized();
}
@@ -400,7 +439,7 @@ public class TransportReplicationActionTests extends ESTestCase {
isRelocated.set(true);
executeOnPrimary = false;
}
- action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) {
+ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), createTransportChannel(listener), task) {
@Override
protected ReplicationOperation<Request, Request, Action.PrimaryResult> createReplicatedOperation(Request request,
ActionListener<Action.PrimaryResult> actionListener, Action.PrimaryShardReference primaryShardReference,
@@ -419,6 +458,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertTrue(listener.isDone());
listener.get();
assertPhase(task, "finished");
+ assertFalse(request.isRetrySet.get());
} else {
assertFalse(executed.get());
assertIndexShardCounter(0); // it should have been freed.
@@ -432,6 +472,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertTrue(listener.isDone());
listener.get();
assertPhase(task, "finished");
+ assertFalse(request.isRetrySet.get());
}
}
@@ -439,7 +480,8 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
- String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId();
+ final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
+ String primaryTargetNodeId = primaryShard.relocatingNodeId();
// simulate execution of the primary phase on the relocation target node
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build();
setState(clusterService, state);
@@ -447,7 +489,7 @@ public class TransportReplicationActionTests extends ESTestCase {
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
AtomicBoolean executed = new AtomicBoolean();
- action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) {
+ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getRelocationId(), createTransportChannel(listener), task) {
@Override
protected ReplicationOperation<Request, Request, Action.PrimaryResult> createReplicatedOperation(Request request,
ActionListener<Action.PrimaryResult> actionListener, Action.PrimaryShardReference primaryShardReference,
@@ -460,9 +502,15 @@ public class TransportReplicationActionTests extends ESTestCase {
}
};
}
+
+ @Override
+ public void onFailure(Exception e) {
+ throw new RuntimeException(e);
+ }
}.run();
assertThat(executed.get(), equalTo(true));
assertPhase(task, "finished");
+ assertFalse(request.isRetrySet.get());
}
public void testPrimaryReference() throws Exception {
@@ -582,7 +630,9 @@ public class TransportReplicationActionTests extends ESTestCase {
state = ClusterState.builder(state).metaData(metaData).build();
setState(clusterService, state);
AtomicBoolean executed = new AtomicBoolean();
- action.new AsyncPrimaryAction(new Request(shardId), createTransportChannel(new PlainActionFuture<>()), null) {
+ ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard();
+ action.new AsyncPrimaryAction(new Request(shardId), primaryShard.allocationId().getId(),
+ createTransportChannel(new PlainActionFuture<>()), null) {
@Override
protected ReplicationOperation<Request, Request, Action.PrimaryResult> createReplicatedOperation(Request request,
ActionListener<Action.PrimaryResult> actionListener, Action.PrimaryShardReference primaryShardReference,
@@ -599,8 +649,10 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
// no replica, we only want to test on primary
- setState(clusterService, state(index, true, ShardRoutingState.STARTED));
+ final ClusterState state = state(index, true, ShardRoutingState.STARTED);
+ setState(clusterService, state);
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
+ final ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard();
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
@@ -608,7 +660,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final boolean throwExceptionOnCreation = i == 1;
final boolean throwExceptionOnRun = i == 2;
final boolean respondWithError = i == 3;
- action.new AsyncPrimaryAction(request, createTransportChannel(listener), task) {
+ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), createTransportChannel(listener), task) {
@Override
protected ReplicationOperation<Request, Request, Action.PrimaryResult> createReplicatedOperation(Request request,
ActionListener<Action.PrimaryResult> actionListener, Action.PrimaryShardReference primaryShardReference,
@@ -652,11 +704,13 @@ public class TransportReplicationActionTests extends ESTestCase {
public void testReplicasCounter() throws Exception {
final ShardId shardId = new ShardId("test", "_na_", 0);
- setState(clusterService, state(shardId.getIndexName(), true,
- ShardRoutingState.STARTED, ShardRoutingState.STARTED));
+ final ClusterState state = state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED);
+ setState(clusterService, state);
+ final ShardRouting replicaRouting = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0);
boolean throwException = randomBoolean();
final ReplicationTask task = maybeTask();
- Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) {
+ Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction,
+ threadPool) {
@Override
protected ReplicaResult shardOperationOnReplica(Request request) {
assertIndexShardCounter(1);
@@ -669,7 +723,9 @@ public class TransportReplicationActionTests extends ESTestCase {
};
final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler();
try {
- replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId),
+ replicaOperationTransportHandler.messageReceived(
+ new TransportReplicationAction.ConcreteShardRequest<>(
+ new Request().setShardId(shardId), replicaRouting.allocationId().getId()),
createTransportChannel(new PlainActionFuture<>()), task);
} catch (ElasticsearchException e) {
assertThat(e.getMessage(), containsString("simulated"));
@@ -711,6 +767,112 @@ public class TransportReplicationActionTests extends ESTestCase {
assertEquals(ActiveShardCount.from(requestWaitForActiveShards), request.waitForActiveShards());
}
+ /** test that a primary request is rejected if it arrives at a shard with a wrong allocation id */
+ public void testPrimaryActionRejectsWrongAid() throws Exception {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, "_na_", 0);
+ setState(clusterService, state(index, true, ShardRoutingState.STARTED));
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ Request request = new Request(shardId).timeout("1ms");
+ action.new PrimaryOperationTransportHandler().messageReceived(
+ new TransportReplicationAction.ConcreteShardRequest<>(request, "_not_a_valid_aid_"),
+ createTransportChannel(listener), maybeTask()
+ );
+ try {
+ listener.get();
+ fail("using a wrong aid didn't fail the operation");
+ } catch (ExecutionException execException) {
+ Throwable throwable = execException.getCause();
+ logger.debug("got exception:" , throwable);
+ assertTrue(throwable.getClass() + " is not a retry exception", action.retryPrimaryException(throwable));
+ }
+ }
+
+ /** test that a replica request is rejected if it arrives at a shard with a wrong allocation id */
+ public void testReplicaActionRejectsWrongAid() throws Exception {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, "_na_", 0);
+ ClusterState state = state(index, false, ShardRoutingState.STARTED, ShardRoutingState.STARTED);
+ final ShardRouting replica = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0);
+ // simulate execution of the node holding the replica
+ state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(replica.currentNodeId())).build();
+ setState(clusterService, state);
+
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ Request request = new Request(shardId).timeout("1ms");
+ action.new ReplicaOperationTransportHandler().messageReceived(
+ new TransportReplicationAction.ConcreteShardRequest<>(request, "_not_a_valid_aid_"),
+ createTransportChannel(listener), maybeTask()
+ );
+ try {
+ listener.get();
+ fail("using a wrong aid didn't fail the operation");
+ } catch (ExecutionException execException) {
+ Throwable throwable = execException.getCause();
+ if (action.retryPrimaryException(throwable) == false) {
+ throw new AssertionError("thrown exception is not retriable", throwable);
+ }
+ assertThat(throwable.getMessage(), containsString("_not_a_valid_aid_"));
+ }
+ }
+
+ /**
+ * test throwing a {@link org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException}
+ * causes a retry
+ */
+ public void testRetryOnReplica() throws Exception {
+ final ShardId shardId = new ShardId("test", "_na_", 0);
+ ClusterState state = state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED);
+ final ShardRouting replica = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0);
+ // simulate execution of the node holding the replica
+ state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(replica.currentNodeId())).build();
+ setState(clusterService, state);
+ AtomicBoolean throwException = new AtomicBoolean(true);
+ final ReplicationTask task = maybeTask();
+ Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction,
+ threadPool) {
+ @Override
+ protected ReplicaResult shardOperationOnReplica(Request request) {
+ assertPhase(task, "replica");
+ if (throwException.get()) {
+ throw new RetryOnReplicaException(shardId, "simulation");
+ }
+ return new ReplicaResult();
+ }
+ };
+ final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler();
+ final PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ final Request request = new Request().setShardId(shardId);
+ request.primaryTerm(state.metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
+ replicaOperationTransportHandler.messageReceived(
+ new TransportReplicationAction.ConcreteShardRequest<>(request, replica.allocationId().getId()),
+ createTransportChannel(listener), task);
+ if (listener.isDone()) {
+ listener.get(); // fail with the exception if there
+ fail("listener shouldn't be done");
+ }
+
+ // no retry yet
+ List<CapturingTransport.CapturedRequest> capturedRequests =
+ transport.getCapturedRequestsByTargetNodeAndClear().get(replica.currentNodeId());
+ assertThat(capturedRequests, nullValue());
+
+ // release the waiting
+ throwException.set(false);
+ setState(clusterService, state);
+
+ capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear().get(replica.currentNodeId());
+ assertThat(capturedRequests, notNullValue());
+ assertThat(capturedRequests.size(), equalTo(1));
+ final CapturingTransport.CapturedRequest capturedRequest = capturedRequests.get(0);
+ assertThat(capturedRequest.action, equalTo("testActionWithExceptions[r]"));
+ assertThat(capturedRequest.request, instanceOf(TransportReplicationAction.ConcreteShardRequest.class));
+ assertThat(((TransportReplicationAction.ConcreteShardRequest<?>) capturedRequest.request).getRequest(), equalTo(request));
+ assertThat(((TransportReplicationAction.ConcreteShardRequest<?>) capturedRequest.request).getTargetAllocationID(),
+ equalTo(replica.allocationId().getId()));
+ }
+
+
private void assertIndexShardCounter(int expected) {
assertThat(count.get(), equalTo(expected));
}
@@ -745,6 +907,7 @@ public class TransportReplicationActionTests extends ESTestCase {
public static class Request extends ReplicationRequest<Request> {
public AtomicBoolean processedOnPrimary = new AtomicBoolean();
public AtomicInteger processedOnReplicas = new AtomicInteger();
+ public AtomicBoolean isRetrySet = new AtomicBoolean(false);
public Request() {
}
@@ -766,6 +929,12 @@ public class TransportReplicationActionTests extends ESTestCase {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
+
+ @Override
+ public void onRetry() {
+ super.onRetry();
+ isRetrySet.set(true);
+ }
}
static class Response extends ReplicationResponse {
@@ -775,9 +944,10 @@ public class TransportReplicationActionTests extends ESTestCase {
Action(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService,
+ ShardStateAction shardStateAction,
ThreadPool threadPool) {
- super(settings, actionName, transportService, clusterService, null, threadPool,
- new ShardStateAction(settings, clusterService, transportService, null, null, threadPool),
+ super(settings, actionName, transportService, clusterService, mockIndicesService(clusterService), threadPool,
+ shardStateAction,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
Request::new, Request::new, ThreadPool.Names.SAME);
}
@@ -804,43 +974,76 @@ public class TransportReplicationActionTests extends ESTestCase {
protected boolean resolveIndex() {
return false;
}
+ }
- @Override
- protected void acquirePrimaryShardReference(ShardId shardId, ActionListener<PrimaryShardReference> onReferenceAcquired) {
- count.incrementAndGet();
- PrimaryShardReference primaryShardReference = new PrimaryShardReference(null, null) {
- @Override
- public boolean isRelocated() {
- return isRelocated.get();
- }
-
- @Override
- public void failShard(String reason, @Nullable Exception e) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public ShardRouting routingEntry() {
- ShardRouting shardRouting = clusterService.state().getRoutingTable()
- .shardRoutingTable(shardId).primaryShard();
- assert shardRouting != null;
- return shardRouting;
- }
-
- @Override
- public void close() {
- count.decrementAndGet();
- }
- };
+ final IndicesService mockIndicesService(ClusterService clusterService) {
+ final IndicesService indicesService = mock(IndicesService.class);
+ when(indicesService.indexServiceSafe(any(Index.class))).then(invocation -> {
+ Index index = (Index)invocation.getArguments()[0];
+ final ClusterState state = clusterService.state();
+ final IndexMetaData indexSafe = state.metaData().getIndexSafe(index);
+ return mockIndexService(indexSafe, clusterService);
+ });
+ when(indicesService.indexService(any(Index.class))).then(invocation -> {
+ Index index = (Index) invocation.getArguments()[0];
+ final ClusterState state = clusterService.state();
+ if (state.metaData().hasIndex(index.getName())) {
+ final IndexMetaData indexSafe = state.metaData().getIndexSafe(index);
+ return mockIndexService(clusterService.state().metaData().getIndexSafe(index), clusterService);
+ } else {
+ return null;
+ }
+ });
+ return indicesService;
+ }
- onReferenceAcquired.onResponse(primaryShardReference);
- }
+ final IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterService clusterService) {
+ final IndexService indexService = mock(IndexService.class);
+ when(indexService.getShard(anyInt())).then(invocation -> {
+ int shard = (Integer) invocation.getArguments()[0];
+ final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
+ if (shard > indexMetaData.getNumberOfShards()) {
+ throw new ShardNotFoundException(shardId);
+ }
+ return mockIndexShard(shardId, clusterService);
+ });
+ return indexService;
+ }
- @Override
- protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener<Releasable> onLockAcquired) {
+ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) {
+ final IndexShard indexShard = mock(IndexShard.class);
+ doAnswer(invocation -> {
+ ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[0];
count.incrementAndGet();
- onLockAcquired.onResponse(count::decrementAndGet);
- }
+ callback.onResponse(count::decrementAndGet);
+ return null;
+ }).when(indexShard).acquirePrimaryOperationLock(any(ActionListener.class), anyString());
+ doAnswer(invocation -> {
+ long term = (Long)invocation.getArguments()[0];
+ ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[1];
+ final long primaryTerm = indexShard.getPrimaryTerm();
+ if (term < primaryTerm) {
+ throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
+ shardId, term, primaryTerm));
+ }
+ count.incrementAndGet();
+ callback.onResponse(count::decrementAndGet);
+ return null;
+ }).when(indexShard).acquireReplicaOperationLock(anyLong(), any(ActionListener.class), anyString());
+ when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> {
+ final ClusterState state = clusterService.state();
+ final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ final ShardRouting routing = node.getByShardId(shardId);
+ if (routing == null) {
+ throw new ShardNotFoundException(shardId, "shard is no longer assigned to current node");
+ }
+ return routing;
+ });
+ when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED);
+ doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
+ when(indexShard.getPrimaryTerm()).thenAnswer(i ->
+ clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
+ return indexShard;
}
class NoopReplicationOperation extends ReplicationOperation<Request, Request, Action.PrimaryResult> {
@@ -858,11 +1061,6 @@ public class TransportReplicationActionTests extends ESTestCase {
* Transport channel that is needed for replica operation testing.
*/
public TransportChannel createTransportChannel(final PlainActionFuture<Response> listener) {
- return createTransportChannel(listener, error -> {
- });
- }
-
- public TransportChannel createTransportChannel(final PlainActionFuture<Response> listener, Consumer<Throwable> consumer) {
return new TransportChannel() {
@Override
@@ -887,7 +1085,6 @@ public class TransportReplicationActionTests extends ESTestCase {
@Override
public void sendResponse(Exception exception) throws IOException {
- consumer.accept(exception);
listener.onFailure(exception);
}
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTestHelper.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTestHelper.java
new file mode 100644
index 0000000000..7e02d82460
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTestHelper.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.replication;
+
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.support.WriteRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.translog.Translog;
+
+import java.util.concurrent.CountDownLatch;
+
+public abstract class TransportWriteActionTestHelper {
+
+
+ public static void performPostWriteActions(final IndexShard indexShard,
+ final WriteRequest<?> request,
+ @Nullable final Translog.Location location,
+ final Logger logger) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ TransportWriteAction.RespondingWriteResult writerResult = new TransportWriteAction.RespondingWriteResult() {
+ @Override
+ public void onSuccess(boolean forcedRefresh) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Exception ex) {
+ throw new AssertionError(ex);
+ }
+ };
+ new TransportWriteAction.AsyncAfterWriteAction(indexShard, request, location, writerResult, logger).run();
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java
index 80e689743f..a554ca53d9 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java
@@ -130,7 +130,8 @@ public class TransportWriteActionTests extends ESTestCase {
private class TestAction extends TransportWriteAction<TestRequest, TestResponse> {
protected TestAction() {
- super(Settings.EMPTY, "test", mock(TransportService.class), null, null, null, null, new ActionFilters(new HashSet<>()),
+ super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR),
+ null, null, null, null, new ActionFilters(new HashSet<>()),
new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME);
}
diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
index 37abc4d5ee..1d73606056 100644
--- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
@@ -142,7 +142,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(THREAD_POOL);
- transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
+ transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
action = new TestTransportInstanceSingleOperationAction(
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
index 1bba4cac3d..05e30d7e2d 100644
--- a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
@@ -20,14 +20,14 @@
package org.elasticsearch.action.termvectors;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
index a98433a100..cb27a527f6 100644
--- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
@@ -48,51 +48,57 @@ public class UpdateRequestTests extends ESTestCase {
public void testUpdateRequest() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
// simple script
- request.source(XContentFactory.jsonBuilder().startObject()
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
.field("script", "script1")
.endObject());
Script script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
Map<String, Object> params = script.getParams();
assertThat(params, nullValue());
// simple verbose script
- request.source(XContentFactory.jsonBuilder().startObject()
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("script").field("inline", "script1").endObject()
.endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
params = script.getParams();
assertThat(params, nullValue());
// script with params
request = new UpdateRequest("test", "type", "1");
- request.source(XContentFactory.jsonBuilder().startObject().startObject("script").field("inline", "script1").startObject("params")
- .field("param1", "value1").endObject().endObject().endObject());
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
+ .startObject("script")
+ .field("inline", "script1")
+ .startObject("params")
+ .field("param1", "value1")
+ .endObject()
+ .endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
assertThat(params.get("param1").toString(), equalTo("value1"));
request = new UpdateRequest("test", "type", "1");
- request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
- .endObject().field("inline", "script1").endObject().endObject());
+ request.fromXContent(XContentFactory.jsonBuilder().startObject().startObject("script")
+ .startObject("params").field("param1", "value1").endObject()
+ .field("inline", "script1").endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
@@ -100,14 +106,24 @@ public class UpdateRequestTests extends ESTestCase {
// script with params and upsert
request = new UpdateRequest("test", "type", "1");
- request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
- .endObject().field("inline", "script1").endObject().startObject("upsert").field("field1", "value1").startObject("compound")
- .field("field2", "value2").endObject().endObject().endObject());
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
+ .startObject("script")
+ .startObject("params")
+ .field("param1", "value1")
+ .endObject()
+ .field("inline", "script1")
+ .endObject()
+ .startObject("upsert")
+ .field("field1", "value1")
+ .startObject("compound")
+ .field("field2", "value2")
+ .endObject()
+ .endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
@@ -117,14 +133,24 @@ public class UpdateRequestTests extends ESTestCase {
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
request = new UpdateRequest("test", "type", "1");
- request.source(XContentFactory.jsonBuilder().startObject().startObject("upsert").field("field1", "value1").startObject("compound")
- .field("field2", "value2").endObject().endObject().startObject("script").startObject("params").field("param1", "value1")
- .endObject().field("inline", "script1").endObject().endObject());
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
+ .startObject("upsert")
+ .field("field1", "value1")
+ .startObject("compound")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()
+ .startObject("script")
+ .startObject("params")
+ .field("param1", "value1")
+ .endObject()
+ .field("inline", "script1")
+ .endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
assertThat(script.getType(), equalTo(ScriptType.INLINE));
- assertThat(script.getLang(), nullValue());
+ assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
params = script.getParams();
assertThat(params, notNullValue());
assertThat(params.size(), equalTo(1));
@@ -135,8 +161,9 @@ public class UpdateRequestTests extends ESTestCase {
// script with doc
request = new UpdateRequest("test", "type", "1");
- request.source(XContentFactory.jsonBuilder().startObject().startObject("doc").field("field1", "value1").startObject("compound")
- .field("field2", "value2").endObject().endObject().endObject());
+ request.fromXContent(XContentFactory.jsonBuilder().startObject()
+ .startObject("doc").field("field1", "value1").startObject("compound")
+ .field("field2", "value2").endObject().endObject().endObject());
Map<String, Object> doc = request.doc().sourceAsMap();
assertThat(doc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
@@ -187,7 +214,7 @@ public class UpdateRequestTests extends ESTestCase {
public void testInvalidBodyThrowsParseException() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
try {
- request.source(new byte[] { (byte) '"' });
+ request.fromXContent(new byte[] { (byte) '"' });
fail("Should have thrown a ElasticsearchParseException");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("Failed to derive xcontent"));
@@ -197,13 +224,56 @@ public class UpdateRequestTests extends ESTestCase {
// Related to issue 15338
public void testFieldsParsing() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type1", "1")
- .source(new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}"));
+ .fromXContent(new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}"));
assertThat(request.doc().sourceAsMap().get("field1").toString(), equalTo("value1"));
assertThat(request.fields(), arrayContaining("_source"));
request = new UpdateRequest("test", "type2", "2")
- .source(new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}"));
+ .fromXContent(new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}"));
assertThat(request.doc().sourceAsMap().get("field2").toString(), equalTo("value2"));
assertThat(request.fields(), arrayContaining("field1", "field2"));
}
+
+ public void testFetchSourceParsing() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type1", "1");
+ request.fromXContent(
+ XContentFactory.jsonBuilder().startObject().field("_source", true).endObject()
+ );
+ assertThat(request.fetchSource(), notNullValue());
+ assertThat(request.fetchSource().includes().length, equalTo(0));
+ assertThat(request.fetchSource().excludes().length, equalTo(0));
+ assertThat(request.fetchSource().fetchSource(), equalTo(true));
+
+ request.fromXContent(
+ XContentFactory.jsonBuilder().startObject().field("_source", false).endObject()
+ );
+ assertThat(request.fetchSource(), notNullValue());
+ assertThat(request.fetchSource().includes().length, equalTo(0));
+ assertThat(request.fetchSource().excludes().length, equalTo(0));
+ assertThat(request.fetchSource().fetchSource(), equalTo(false));
+
+ request.fromXContent(
+ XContentFactory.jsonBuilder().startObject().field("_source", "path.inner.*").endObject()
+ );
+ assertThat(request.fetchSource(), notNullValue());
+ assertThat(request.fetchSource().fetchSource(), equalTo(true));
+ assertThat(request.fetchSource().includes().length, equalTo(1));
+ assertThat(request.fetchSource().excludes().length, equalTo(0));
+ assertThat(request.fetchSource().includes()[0], equalTo("path.inner.*"));
+
+ request.fromXContent(
+ XContentFactory.jsonBuilder().startObject()
+ .startObject("_source")
+ .field("includes", "path.inner.*")
+ .field("excludes", "another.inner.*")
+ .endObject()
+ .endObject()
+ );
+ assertThat(request.fetchSource(), notNullValue());
+ assertThat(request.fetchSource().fetchSource(), equalTo(true));
+ assertThat(request.fetchSource().includes().length, equalTo(1));
+ assertThat(request.fetchSource().excludes().length, equalTo(1));
+ assertThat(request.fetchSource().includes()[0], equalTo("path.inner.*"));
+ assertThat(request.fetchSource().excludes()[0], equalTo("another.inner.*"));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java
index a92125476e..ebca512bae 100644
--- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java
+++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java
@@ -20,10 +20,8 @@
package org.elasticsearch.aliases;
import org.apache.lucene.search.join.ScoreMode;
-import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
@@ -31,7 +29,6 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.AliasOrIndex;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -52,14 +49,13 @@ import org.elasticsearch.test.ESIntegTestCase;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.client.Requests.indexRequest;
-import static org.elasticsearch.cluster.metadata.AliasAction.Type.ADD;
-import static org.elasticsearch.cluster.metadata.AliasAction.Type.REMOVE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
@@ -114,25 +110,15 @@ public class IndexAliasesIT extends ESIntegTestCase {
logger.info("--> creating index [test]");
createIndex("test");
- ensureGreen();
-
//invalid filter, invalid json
- IndicesAliasesRequestBuilder indicesAliasesRequestBuilder = admin().indices().prepareAliases().addAlias("test", "alias1", "abcde");
- try {
- indicesAliasesRequestBuilder.get();
- fail("put alias should have been failed due to invalid filter");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
- }
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> admin().indices().prepareAliases().addAlias("test", "alias1", "abcde").get());
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
- //valid json , invalid filter
- indicesAliasesRequestBuilder = admin().indices().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }");
- try {
- indicesAliasesRequestBuilder.get();
- fail("put alias should have been failed due to invalid filter");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
- }
+ // valid json , invalid filter
+ e = expectThrows(IllegalArgumentException.class,
+ () -> admin().indices().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }").get());
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
}
public void testFilteringAliases() throws Exception {
@@ -598,7 +584,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
.addAlias("foobar", "foo"));
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(new AliasAction(ADD, "foobar", "bac").routing("bla")));
+ .addAliasAction(AliasActions.add().index("foobar").alias("bac").routing("bla")));
logger.info("--> getting bar and baz for index bazbar");
getResponse = admin().indices().prepareGetAliases("bar", "bac").addIndices("bazbar").get();
@@ -729,224 +715,6 @@ public class IndexAliasesIT extends ESIntegTestCase {
assertThat(existsResponse.exists(), equalTo(false));
}
- public void testAddAliasNullWithoutExistingIndices() {
- try {
- assertAcked(admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1")));
- fail("create alias should have failed due to null index");
- } catch (IllegalArgumentException e) {
- assertThat("Exception text does not contain \"Alias action [add]: [index/indices] may not be empty string\"",
- e.getMessage(), containsString("Alias action [add]: [index/indices] may not be empty string"));
- }
- }
-
- public void testAddAliasNullWithExistingIndices() throws Exception {
- logger.info("--> creating index [test]");
- createIndex("test");
- ensureGreen();
-
- logger.info("--> aliasing index [null] with [empty-alias]");
-
- try {
- assertAcked(admin().indices().prepareAliases().addAlias((String) null, "empty-alias"));
- fail("create alias should have failed due to null index");
- } catch (IllegalArgumentException e) {
- assertThat("Exception text does not contain \"Alias action [add]: [index/indices] may not be empty string\"",
- e.getMessage(), containsString("Alias action [add]: [index/indices] may not be empty string"));
- }
- }
-
- public void testAddAliasEmptyIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "", "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- }
-
- public void testAddAliasNullAlias() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", (String)null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", (String[])null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] is either missing or null"));
- }
- }
-
- public void testAddAliasEmptyAlias() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", "")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- }
-
- public void testAddAliasNullAliasNullIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, null)).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, null, (String)null)).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- }
-
- public void testAddAliasEmptyAliasEmptyIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "", "")).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- }
-
- public void testRemoveAliasNullIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, null, "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- }
-
- public void testRemoveAliasEmptyIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("", "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "", "alias1")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
- }
- }
-
- public void testRemoveAliasNullAlias() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", (String)null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", (String[])null)).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] is either missing or null"));
- }
- }
-
- public void testRemoveAliasEmptyAlias() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", "")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", "")).get();
- fail("Expected ActionRequestValidationException");
- } catch (ActionRequestValidationException e) {
- assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
- }
- }
-
- public void testRemoveAliasNullAliasNullIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, null)).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, null, (String)null)).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, (String[])null, (String[])null)).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- }
-
- public void testRemoveAliasEmptyAliasEmptyIndex() {
- try {
- admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- try {
- admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "", "")).get();
- fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
- } catch (ActionRequestValidationException e) {
- assertThat(e.validationErrors(), notNullValue());
- assertThat(e.validationErrors().size(), equalTo(2));
- }
- }
-
public void testGetAllAliasesWorks() {
createIndex("index1");
createIndex("index2");
@@ -1039,7 +807,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
final int numDocs = scaledRandomIntBetween(5, 52);
for (int i = 1; i <= numDocs; i++) {
- client().prepareIndex("my-index", "my-type").setCreate(true).setSource("timestamp", "2016-12-12").get();
+ client().prepareIndex("my-index", "my-type").setSource("timestamp", "2016-12-12").get();
if (i % 2 == 0) {
refresh();
SearchResponse response = client().prepareSearch("filter1").get();
@@ -1113,6 +881,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
}
}
+ public void testRemoveIndexAndReplaceWithAlias() throws InterruptedException, ExecutionException {
+ assertAcked(client().admin().indices().prepareCreate("test"));
+ indexRandom(true, client().prepareIndex("test_2", "test", "test").setSource("test", "test"));
+ assertAcked(client().admin().indices().prepareAliases().addAlias("test_2", "test").removeIndex("test"));
+ assertHitCount(client().prepareSearch("test").get(), 1);
+ }
+
private void checkAliases() {
GetAliasesResponse getAliasesResponse = admin().indices().prepareGetAliases("alias1").get();
assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1));
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
index 1c4cd5b4e8..9813731017 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
@@ -19,11 +19,13 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -49,7 +51,7 @@ import static org.mockito.Mockito.when;
public class BootstrapCheckTests extends ESTestCase {
- public void testNonProductionMode() {
+ public void testNonProductionMode() throws NodeValidationException {
// nothing should happen since we are in non-production mode
final List<TransportAddress> transportAddresses = new ArrayList<>();
for (int i = 0; i < randomIntBetween(1, 8); i++) {
@@ -66,20 +68,16 @@ public class BootstrapCheckTests extends ESTestCase {
BootstrapCheck.check(Settings.EMPTY, boundTransportAddress);
}
- public void testNoLogMessageInNonProductionMode() {
- final ESLogger logger = mock(ESLogger.class);
- BootstrapCheck.check(false, randomBoolean(), Collections.emptyList(), logger);
+ public void testNoLogMessageInNonProductionMode() throws NodeValidationException {
+ final Logger logger = mock(Logger.class);
+ BootstrapCheck.check(false, Collections.emptyList(), logger);
verifyNoMoreInteractions(logger);
}
- public void testLogMessageInProductionMode() {
- final ESLogger logger = mock(ESLogger.class);
- final boolean ignoreSystemChecks = randomBoolean();
- BootstrapCheck.check(true, ignoreSystemChecks, Collections.emptyList(), logger);
+ public void testLogMessageInProductionMode() throws NodeValidationException {
+ final Logger logger = mock(Logger.class);
+ BootstrapCheck.check(true, Collections.emptyList(), logger);
verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
- if (ignoreSystemChecks) {
- verify(logger).warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
- }
verifyNoMoreInteractions(logger);
}
@@ -137,11 +135,6 @@ public class BootstrapCheckTests extends ESTestCase {
public String errorMessage() {
return "first";
}
-
- @Override
- public boolean isSystemCheck() {
- return false;
- }
},
new BootstrapCheck.Check() {
@Override
@@ -153,16 +146,11 @@ public class BootstrapCheckTests extends ESTestCase {
public String errorMessage() {
return "second";
}
-
- @Override
- public boolean isSystemCheck() {
- return false;
- }
}
);
- final RuntimeException e =
- expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, false, checks, "testExceptionAggregation"));
+ final NodeValidationException e =
+ expectThrows(NodeValidationException.class, () -> BootstrapCheck.check(true, checks, "testExceptionAggregation"));
assertThat(e, hasToString(allOf(containsString("bootstrap checks failed"), containsString("first"), containsString("second"))));
final Throwable[] suppressed = e.getSuppressed();
assertThat(suppressed.length, equalTo(2));
@@ -172,7 +160,7 @@ public class BootstrapCheckTests extends ESTestCase {
assertThat(suppressed[1], hasToString(containsString("second")));
}
- public void testHeapSizeCheck() {
+ public void testHeapSizeCheck() throws NodeValidationException {
final int initial = randomIntBetween(0, Integer.MAX_VALUE - 1);
final int max = randomIntBetween(initial + 1, Integer.MAX_VALUE);
final AtomicLong initialHeapSize = new AtomicLong(initial);
@@ -190,10 +178,10 @@ public class BootstrapCheckTests extends ESTestCase {
}
};
- final RuntimeException e =
+ final NodeValidationException e =
expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck"));
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck"));
assertThat(
e.getMessage(),
containsString("initial heap size [" + initialHeapSize.get() + "] " +
@@ -201,7 +189,7 @@ public class BootstrapCheckTests extends ESTestCase {
initialHeapSize.set(maxHeapSize.get());
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
// nothing should happen if the initial heap size or the max
// heap size is not available
@@ -210,10 +198,10 @@ public class BootstrapCheckTests extends ESTestCase {
} else {
maxHeapSize.set(0);
}
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
}
- public void testFileDescriptorLimits() {
+ public void testFileDescriptorLimits() throws NodeValidationException {
final boolean osX = randomBoolean(); // simulates OS X versus non-OS X
final int limit = osX ? 10240 : 1 << 16;
final AtomicLong maxFileDescriptorCount = new AtomicLong(randomIntBetween(1, limit - 1));
@@ -234,19 +222,19 @@ public class BootstrapCheckTests extends ESTestCase {
};
}
- final RuntimeException e =
- expectThrows(RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits"));
+ final NodeValidationException e =
+ expectThrows(NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits"));
assertThat(e.getMessage(), containsString("max file descriptors"));
maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
// nothing should happen if current file descriptor count is
// not available
maxFileDescriptorCount.set(-1);
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
}
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
@@ -257,7 +245,7 @@ public class BootstrapCheckTests extends ESTestCase {
assertThat(e.getMessage(), containsString("limit must be positive but was"));
}
- public void testMlockallCheck() {
+ public void testMlockallCheck() throws NodeValidationException {
class MlockallCheckTestCase {
private final boolean mlockallSet;
@@ -287,11 +275,10 @@ public class BootstrapCheckTests extends ESTestCase {
};
if (testCase.shouldFail) {
- final RuntimeException e = expectThrows(
- RuntimeException.class,
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
() -> BootstrapCheck.check(
true,
- false,
Collections.singletonList(check),
"testFileDescriptorLimitsThrowsOnInvalidLimit"));
assertThat(
@@ -299,12 +286,12 @@ public class BootstrapCheckTests extends ESTestCase {
containsString("memory locking requested for elasticsearch process but memory is not locked"));
} else {
// nothing should happen
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
}
}
}
- public void testMaxNumberOfThreadsCheck() {
+ public void testMaxNumberOfThreadsCheck() throws NodeValidationException {
final int limit = 1 << 11;
final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1));
final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() {
@@ -314,22 +301,22 @@ public class BootstrapCheckTests extends ESTestCase {
}
};
- final RuntimeException e = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
assertThat(e.getMessage(), containsString("max number of threads"));
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
// nothing should happen if current max number of threads is
// not available
maxNumberOfThreads.set(-1);
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
}
- public void testMaxSizeVirtualMemory() {
+ public void testMaxSizeVirtualMemory() throws NodeValidationException {
final long rlimInfinity = Constants.MAC_OS_X ? 9223372036854775807L : -1L;
final AtomicLong maxSizeVirtualMemory = new AtomicLong(randomIntBetween(0, Integer.MAX_VALUE));
final BootstrapCheck.MaxSizeVirtualMemoryCheck check = new BootstrapCheck.MaxSizeVirtualMemoryCheck() {
@@ -345,22 +332,22 @@ public class BootstrapCheckTests extends ESTestCase {
};
- final RuntimeException e = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
assertThat(e.getMessage(), containsString("max size virtual memory"));
maxSizeVirtualMemory.set(rlimInfinity);
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
// nothing should happen if max size virtual memory is not
// available
maxSizeVirtualMemory.set(Long.MIN_VALUE);
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
}
- public void testMaxMapCountCheck() {
+ public void testMaxMapCountCheck() throws NodeValidationException {
final int limit = 1 << 18;
final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, limit - 1));
final BootstrapCheck.MaxMapCountCheck check = new BootstrapCheck.MaxMapCountCheck() {
@@ -370,31 +357,22 @@ public class BootstrapCheckTests extends ESTestCase {
}
};
- RuntimeException e = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck"));
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck"));
assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count"));
maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
// nothing should happen if current vm.max_map_count is not
// available
maxMapCount.set(-1);
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
- }
-
- public void testMinMasterNodes() {
- boolean isSet = randomBoolean();
- BootstrapCheck.Check check = new BootstrapCheck.MinMasterNodesCheck(isSet);
- assertThat(check.check(), not(equalTo(isSet)));
- List<BootstrapCheck.Check> defaultChecks = BootstrapCheck.checks(Settings.EMPTY);
-
- expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, false, defaultChecks, "testMinMasterNodes"));
+ BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
}
- public void testClientJvmCheck() {
+ public void testClientJvmCheck() throws NodeValidationException {
final AtomicReference<String> vmName = new AtomicReference<>("Java HotSpot(TM) 32-Bit Client VM");
final BootstrapCheck.Check check = new BootstrapCheck.ClientJvmCheck() {
@Override
@@ -403,19 +381,40 @@ public class BootstrapCheckTests extends ESTestCase {
}
};
- final RuntimeException e = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testClientJvmCheck"));
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testClientJvmCheck"));
assertThat(
e.getMessage(),
containsString("JVM is using the client VM [Java HotSpot(TM) 32-Bit Client VM] " +
"but should be using a server VM for the best performance"));
vmName.set("Java HotSpot(TM) 32-Bit Server VM");
- BootstrapCheck.check(true, false, Collections.singletonList(check), "testClientJvmCheck");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testClientJvmCheck");
}
- public void testMightForkCheck() {
+ public void testUseSerialGCCheck() throws NodeValidationException {
+ final AtomicReference<String> useSerialGC = new AtomicReference<>("true");
+ final BootstrapCheck.Check check = new BootstrapCheck.UseSerialGCCheck() {
+ @Override
+ String getUseSerialGC() {
+ return useSerialGC.get();
+ }
+ };
+
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(true, Collections.singletonList(check), "testUseSerialGCCheck"));
+ assertThat(
+ e.getMessage(),
+ containsString("JVM is using the serial collector but should not be for the best performance; " + "" +
+ "either it's the default for the VM [" + JvmInfo.jvmInfo().getVmName() +"] or -XX:+UseSerialGC was explicitly specified"));
+
+ useSerialGC.set("false");
+ BootstrapCheck.check(true, Collections.singletonList(check), "testUseSerialGCCheck");
+ }
+
+ public void testMightForkCheck() throws NodeValidationException {
final AtomicBoolean isSeccompInstalled = new AtomicBoolean();
final AtomicBoolean mightFork = new AtomicBoolean();
final BootstrapCheck.MightForkCheck check = new BootstrapCheck.MightForkCheck() {
@@ -443,7 +442,7 @@ public class BootstrapCheckTests extends ESTestCase {
e -> assertThat(e.getMessage(), containsString("error")));
}
- public void testOnErrorCheck() {
+ public void testOnErrorCheck() throws NodeValidationException {
final AtomicBoolean isSeccompInstalled = new AtomicBoolean();
final AtomicReference<String> onError = new AtomicReference<>();
final BootstrapCheck.MightForkCheck check = new BootstrapCheck.OnErrorCheck() {
@@ -471,7 +470,7 @@ public class BootstrapCheckTests extends ESTestCase {
+ " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError")));
}
- public void testOnOutOfMemoryErrorCheck() {
+ public void testOnOutOfMemoryErrorCheck() throws NodeValidationException {
final AtomicBoolean isSeccompInstalled = new AtomicBoolean();
final AtomicReference<String> onOutOfMemoryError = new AtomicReference<>();
final BootstrapCheck.MightForkCheck check = new BootstrapCheck.OnOutOfMemoryErrorCheck() {
@@ -505,7 +504,7 @@ public class BootstrapCheckTests extends ESTestCase {
final AtomicBoolean isSeccompInstalled,
final Runnable disableMightFork,
final Runnable enableMightFork,
- final Consumer<RuntimeException> consumer) {
+ final Consumer<NodeValidationException> consumer) throws NodeValidationException {
final String methodName = Thread.currentThread().getStackTrace()[2].getMethodName();
@@ -516,13 +515,13 @@ public class BootstrapCheckTests extends ESTestCase {
} else {
enableMightFork.run();
}
- BootstrapCheck.check(true, randomBoolean(), Collections.singletonList(check), methodName);
+ BootstrapCheck.check(true, Collections.singletonList(check), methodName);
// if seccomp is enabled, but we will not fork, nothing should
// happen
isSeccompInstalled.set(true);
disableMightFork.run();
- BootstrapCheck.check(true, randomBoolean(), Collections.singletonList(check), methodName);
+ BootstrapCheck.check(true, Collections.singletonList(check), methodName);
// if seccomp is enabled, and we might fork, the check should
// be enforced, regardless of bootstrap checks being enabled or
@@ -530,51 +529,12 @@ public class BootstrapCheckTests extends ESTestCase {
isSeccompInstalled.set(true);
enableMightFork.run();
- final RuntimeException e = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(randomBoolean(), randomBoolean(), Collections.singletonList(check), methodName));
+ final NodeValidationException e = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(randomBoolean(), Collections.singletonList(check), methodName));
consumer.accept(e);
}
- public void testIgnoringSystemChecks() {
- final BootstrapCheck.Check check = new BootstrapCheck.Check() {
- @Override
- public boolean check() {
- return true;
- }
-
- @Override
- public String errorMessage() {
- return "error";
- }
-
- @Override
- public boolean isSystemCheck() {
- return true;
- }
- };
-
- final RuntimeException notIgnored = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testIgnoringSystemChecks"));
- assertThat(notIgnored, hasToString(containsString("error")));
-
- final ESLogger logger = mock(ESLogger.class);
-
- // nothing should happen if we ignore system checks
- BootstrapCheck.check(true, true, Collections.singletonList(check), logger);
- verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
- verify(logger).warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
- verify(logger).warn("error");
- verifyNoMoreInteractions(logger);
- reset(logger);
-
- // nothing should happen if we ignore all checks
- BootstrapCheck.check(false, randomBoolean(), Collections.singletonList(check), logger);
- verify(logger).warn("error");
- verifyNoMoreInteractions(logger);
- }
-
public void testAlwaysEnforcedChecks() {
final BootstrapCheck.Check check = new BootstrapCheck.Check() {
@Override
@@ -588,19 +548,14 @@ public class BootstrapCheckTests extends ESTestCase {
}
@Override
- public boolean isSystemCheck() {
- return randomBoolean();
- }
-
- @Override
public boolean alwaysEnforce() {
return true;
}
};
- final RuntimeException alwaysEnforced = expectThrows(
- RuntimeException.class,
- () -> BootstrapCheck.check(randomBoolean(), randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks"));
+ final NodeValidationException alwaysEnforced = expectThrows(
+ NodeValidationException.class,
+ () -> BootstrapCheck.check(randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks"));
assertThat(alwaysEnforced, hasToString(containsString("error")));
}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java
index f8bdf24499..9a1417bdfa 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java
@@ -43,6 +43,9 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--daemonize");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-p", "/tmp/pid");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--pidfile", "/tmp/pid");
+ runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-q");
+ runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--quiet");
+
runTestThatVersionIsReturned("-V");
runTestThatVersionIsReturned("--version");
}
@@ -66,7 +69,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
}
private void runTestVersion(int expectedStatus, Consumer<String> outputConsumer, String... args) throws Exception {
- runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, esSettings) -> {}, args);
+ runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, quiet, esSettings) -> {}, args);
}
public void testPositionalArgs() throws Exception {
@@ -74,21 +77,21 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
- (foreground, pidFile, esSettings) -> {},
+ (foreground, pidFile, quiet, esSettings) -> {},
"foo"
);
runTest(
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")),
- (foreground, pidFile, esSettings) -> {},
+ (foreground, pidFile, quiet, esSettings) -> {},
"foo", "bar"
);
runTest(
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
- (foreground, pidFile, esSettings) -> {},
+ (foreground, pidFile, quiet, esSettings) -> {},
"-E", "foo=bar", "foo", "-E", "baz=qux"
);
}
@@ -109,7 +112,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
expectedStatus,
expectedInit,
outputConsumer,
- (foreground, pidFile, esSettings) -> assertThat(pidFile.toString(), equalTo(expectedPidFile.toString())),
+ (foreground, pidFile, quiet, esSettings) -> assertThat(pidFile.toString(), equalTo(expectedPidFile.toString())),
args);
}
@@ -124,7 +127,22 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
- (foreground, pidFile, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)),
+ (foreground, pidFile, quiet, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)),
+ args);
+ }
+
+ public void testThatParsingQuietOptionWorks() throws Exception {
+ runQuietTest(true, "-q");
+ runQuietTest(true, "--quiet");
+ runQuietTest(false);
+ }
+
+ private void runQuietTest(final boolean expectedQuiet, final String... args) throws Exception {
+ runTest(
+ ExitCodes.OK,
+ true,
+ output -> {},
+ (foreground, pidFile, quiet, esSettings) -> assertThat(quiet, equalTo(expectedQuiet)),
args);
}
@@ -133,7 +151,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
- (foreground, pidFile, esSettings) -> {
+ (foreground, pidFile, quiet, esSettings) -> {
assertThat(esSettings.size(), equalTo(2));
assertThat(esSettings, hasEntry("foo", "bar"));
assertThat(esSettings, hasEntry("baz", "qux"));
@@ -147,7 +165,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Setting [foo] must not be empty")),
- (foreground, pidFile, esSettings) -> {},
+ (foreground, pidFile, quiet, esSettings) -> {},
"-E", "foo="
);
}
@@ -157,7 +175,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("network.host is not a recognized option")),
- (foreground, pidFile, esSettings) -> {},
+ (foreground, pidFile, quiet, esSettings) -> {},
"--network.host");
}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java
index bb3ef29176..55d5e65580 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java
@@ -111,15 +111,15 @@ public class JarHellTests extends ESTestCase {
}
}
- public void testLog4jLeniency() throws Exception {
+ public void testLog4jThrowableProxyLeniency() throws Exception {
Path dir = createTempDir();
- URL[] jars = {makeJar(dir, "foo.jar", null, "org/apache/log4j/DuplicateClass.class"), makeJar(dir, "bar.jar", null, "org/apache/log4j/DuplicateClass.class")};
+ URL[] jars = {makeJar(dir, "foo.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class"), makeJar(dir, "bar.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class")};
JarHell.checkJarHell(jars);
}
- public void testBaseDateTimeLeniency() throws Exception {
+ public void testLog4jServerLeniency() throws Exception {
Path dir = createTempDir();
- URL[] jars = {makeJar(dir, "foo.jar", null, "org/joda/time/base/BaseDateTime.class"), makeJar(dir, "bar.jar", null, "org/joda/time/base/BaseDateTime.class")};
+ URL[] jars = {makeJar(dir, "foo.jar", null, "org.apache.logging.log4j.core.jmx.Server.class"), makeJar(dir, "bar.jar", null, "org.apache.logging.log4j.core.jmx.Server.class")};
JarHell.checkJarHell(jars);
}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java
index b48fcc78c6..b3862f5af1 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java
@@ -19,20 +19,25 @@
package org.elasticsearch.bootstrap;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.MockLogAppender;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.function.Predicate;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.verify;
@@ -47,8 +52,7 @@ public class MaxMapCountCheckTests extends ESTestCase {
}
}
- @SuppressLoggerChecks(reason = "mock usage")
- public void testGetMaxMapCount() throws IOException {
+ public void testGetMaxMapCount() throws IOException, IllegalAccessException {
final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE);
final BufferedReader reader = mock(BufferedReader.class);
when(reader.readLine()).thenReturn(Long.toString(procSysVmMaxMapCount));
@@ -64,20 +68,92 @@ public class MaxMapCountCheckTests extends ESTestCase {
assertThat(check.getMaxMapCount(), equalTo(procSysVmMaxMapCount));
verify(reader).close();
- reset(reader);
- final IOException ioException = new IOException("fatal");
- when(reader.readLine()).thenThrow(ioException);
- final ESLogger logger = mock(ESLogger.class);
- assertThat(check.getMaxMapCount(logger), equalTo(-1L));
- verify(logger).warn("I/O exception while trying to read [{}]", ioException, procSysVmMaxMapCountPath);
- verify(reader).close();
+ {
+ reset(reader);
+ final IOException ioException = new IOException("fatal");
+ when(reader.readLine()).thenThrow(ioException);
+ final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountIOException");
+ final MockLogAppender appender = new MockLogAppender();
+ appender.addExpectation(
+ new ParameterizedMessageLoggingExpectation(
+ "expected logged I/O exception",
+ "testGetMaxMapCountIOException",
+ Level.WARN,
+ "I/O exception while trying to read [{}]",
+ new Object[] { procSysVmMaxMapCountPath },
+ e -> ioException == e));
+ Loggers.addAppender(logger, appender);
+ assertThat(check.getMaxMapCount(logger), equalTo(-1L));
+ appender.assertAllExpectationsMatched();
+ verify(reader).close();
+ Loggers.removeAppender(logger, appender);
+ }
+
+ {
+ reset(reader);
+ when(reader.readLine()).thenReturn("eof");
+ final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountNumberFormatException");
+ final MockLogAppender appender = new MockLogAppender();
+ appender.addExpectation(
+ new ParameterizedMessageLoggingExpectation(
+ "expected logged number format exception",
+ "testGetMaxMapCountNumberFormatException",
+ Level.WARN,
+ "unable to parse vm.max_map_count [{}]",
+ new Object[] { "eof" },
+ e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")));
+ Loggers.addAppender(logger, appender);
+ assertThat(check.getMaxMapCount(logger), equalTo(-1L));
+ appender.assertAllExpectationsMatched();
+ verify(reader).close();
+ Loggers.removeAppender(logger, appender);
+ }
+
+ }
+
+ private static class ParameterizedMessageLoggingExpectation implements MockLogAppender.LoggingExpectation {
+
+ private boolean saw = false;
+
+ private final String name;
+ private final String loggerName;
+ private final Level level;
+ private final String messagePattern;
+ private final Object[] arguments;
+ private final Predicate<Throwable> throwablePredicate;
+
+ private ParameterizedMessageLoggingExpectation(
+ final String name,
+ final String loggerName,
+ final Level level,
+ final String messagePattern,
+ final Object[] arguments,
+ final Predicate<Throwable> throwablePredicate) {
+ this.name = name;
+ this.loggerName = loggerName;
+ this.level = level;
+ this.messagePattern = messagePattern;
+ this.arguments = arguments;
+ this.throwablePredicate = throwablePredicate;
+ }
+
+ @Override
+ public void match(LogEvent event) {
+ if (event.getLevel().equals(level) &&
+ event.getLoggerName().equals(loggerName) &&
+ event.getMessage() instanceof ParameterizedMessage) {
+ final ParameterizedMessage message = (ParameterizedMessage)event.getMessage();
+ saw = message.getFormat().equals(messagePattern) &&
+ Arrays.deepEquals(arguments, message.getParameters()) &&
+ throwablePredicate.test(event.getThrown());
+ }
+ }
+
+ @Override
+ public void assertMatched() {
+ assertTrue(name, saw);
+ }
- reset(reader);
- reset(logger);
- when(reader.readLine()).thenReturn("eof");
- assertThat(check.getMaxMapCount(logger), equalTo(-1L));
- verify(logger).warn(eq("unable to parse vm.max_map_count [{}]"), any(NumberFormatException.class), eq("eof"));
- verify(reader).close();
}
public void testMaxMapCountCheckRead() throws IOException {
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
index 45c89062a5..e4e7927872 100644
--- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
+++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
@@ -24,15 +24,18 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.segments.IndexSegments;
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@@ -47,6 +50,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Segment;
import org.elasticsearch.index.mapper.StringFieldMapperPositionIncrementGapTests;
import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
@@ -103,8 +107,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
@Before
public void initIndexesList() throws Exception {
- indexes = OldIndexUtils.loadIndexesList("index", getBwcIndicesPath());
- unsupportedIndexes = OldIndexUtils.loadIndexesList("unsupported", getBwcIndicesPath());
+ indexes = OldIndexUtils.loadDataFilesList("index", getBwcIndicesPath());
+ unsupportedIndexes = OldIndexUtils.loadDataFilesList("unsupported", getBwcIndicesPath());
}
@AfterClass
@@ -249,15 +253,43 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
Version actualVersionCreated = Version.indexCreated(getIndexResponse.getSettings().get(indexName));
assertEquals(indexCreated, actualVersionCreated);
ensureYellow(indexName);
+ RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(indexName)
+ .setDetailed(true).setActiveOnly(false).get();
+ boolean foundTranslog = false;
+ for (List<RecoveryState> states : recoveryResponse.shardRecoveryStates().values()) {
+ for (RecoveryState state : states) {
+ if (state.getStage() == RecoveryState.Stage.DONE
+ && state.getPrimary()
+ && state.getRecoverySource().getType() == RecoverySource.Type.EXISTING_STORE) {
+ assertFalse("more than one primary recoverd?", foundTranslog);
+ assertNotEquals(0, state.getTranslog().recoveredOperations());
+ foundTranslog = true;
+ }
+ }
+ }
+ assertTrue("expected translog but nothing was recovered", foundTranslog);
IndicesSegmentResponse segmentsResponse = client().admin().indices().prepareSegments(indexName).get();
IndexSegments segments = segmentsResponse.getIndices().get(indexName);
+ int numCurrent = 0;
+ int numBWC = 0;
for (IndexShardSegments indexShardSegments : segments) {
for (ShardSegments shardSegments : indexShardSegments) {
for (Segment segment : shardSegments) {
- assertEquals(indexCreated.luceneVersion, segment.version);
+ if (indexCreated.luceneVersion.equals(segment.version)) {
+ numBWC++;
+ if (Version.CURRENT.luceneVersion.equals(segment.version)) {
+ numCurrent++;
+ }
+ } else if (Version.CURRENT.luceneVersion.equals(segment.version)) {
+ numCurrent++;
+ } else {
+ fail("unexpected version " + segment.version);
+ }
}
}
}
+ assertNotEquals("expected at least 1 current segment after translog recovery", 0, numCurrent);
+ assertNotEquals("expected at least 1 old segment", 0, numBWC);
SearchResponse test = client().prepareSearch(indexName).get();
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
}
@@ -279,6 +311,14 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
searchRsp = searchReq.get();
ElasticsearchAssertions.assertNoFailures(searchRsp);
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indexName).get();
+ Version versionCreated = Version.fromId(Integer.parseInt(getSettingsResponse.getSetting(indexName, "index.version.created")));
+ if (versionCreated.onOrAfter(Version.V_2_4_0)) {
+ searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.existsQuery("field.with.dots"));
+ searchRsp = searchReq.get();
+ ElasticsearchAssertions.assertNoFailures(searchRsp);
+ assertEquals(numDocs, searchRsp.getHits().getTotalHits());
+ }
}
boolean findPayloadBoostInExplanation(Explanation expl) {
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java
index 9bfcc55499..d7ed0d8db5 100644
--- a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java
+++ b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java
@@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug
-@TestLogging("indices.recovery:DEBUG")
+@TestLogging("org.elasticsearch.indices.recovery:DEBUG")
public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase {
/**
diff --git a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java
index 276a43581a..a82f964c01 100644
--- a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java
+++ b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java
@@ -73,8 +73,9 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
protected ThreadPool threadPool;
private Client client;
- @Before
- public void initClient() {
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
Settings settings = Settings.builder()
.put(HEADER_SETTINGS)
.put("path.home", createTempDir().toString())
@@ -85,8 +86,10 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
client = buildClient(settings, ACTIONS);
}
- @After
- public void cleanupClient() throws Exception {
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
client.close();
terminate(threadPool);
}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java
index e736e4b86a..02240a6bf2 100644
--- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java
@@ -31,48 +31,57 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.MockTransportClient;
-import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
-import org.elasticsearch.transport.TransportService;
import java.util.Collections;
+import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
-/**
- *
- */
public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
- private static final LocalTransportAddress address = new LocalTransportAddress("test");
+ private MockTransportService transportService;
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ transportService.stop();
+ transportService.close();
+ }
@Override
protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) {
+ transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
+ transportService.start();
+ transportService.acceptIncomingRequests();
TransportClient client = new MockTransportClient(Settings.builder()
.put("client.transport.sniff", false)
.put("cluster.name", "cluster1")
.put("node.name", "transport_client_" + this.getTestName())
.put(headersSettings)
- .build(), InternalTransportService.TestPlugin.class);
-
- client.addTransportAddress(address);
+ .build(), InternalTransportServiceInterceptor.TestPlugin.class);
+ InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class)
+ .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get();
+ plugin.instance.threadPool = client.threadPool();
+ plugin.instance.address = transportService.boundAddress().publishAddress();
+ client.addTransportAddress(transportService.boundAddress().publishAddress());
return client;
}
@@ -85,72 +94,78 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
.put("client.transport.nodes_sampler_interval", "1s")
.put(HEADER_SETTINGS)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(),
- InternalTransportService.TestPlugin.class)) {
- client.addTransportAddress(address);
-
- InternalTransportService service = (InternalTransportService) client.injector.getInstance(TransportService.class);
-
- if (!service.clusterStateLatch.await(5, TimeUnit.SECONDS)) {
+ InternalTransportServiceInterceptor.TestPlugin.class)) {
+ InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class)
+ .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get();
+ plugin.instance.threadPool = client.threadPool();
+ plugin.instance.address = transportService.boundAddress().publishAddress();
+ client.addTransportAddress(transportService.boundAddress().publishAddress());
+
+ if (!plugin.instance.clusterStateLatch.await(5, TimeUnit.SECONDS)) {
fail("takes way too long to get the cluster state");
}
assertThat(client.connectedNodes().size(), is(1));
- assertThat(client.connectedNodes().get(0).getAddress(), is((TransportAddress) address));
+ assertThat(client.connectedNodes().get(0).getAddress(), is(transportService.boundAddress().publishAddress()));
}
}
- public static class InternalTransportService extends TransportService {
+ public static class InternalTransportServiceInterceptor implements TransportInterceptor {
- public static class TestPlugin extends Plugin {
- public void onModule(NetworkModule transportModule) {
- transportModule.registerTransportService("internal", InternalTransportService.class);
- }
- @Override
- public Settings additionalSettings() {
- return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build();
- }
- }
+ ThreadPool threadPool;
+ TransportAddress address;
- CountDownLatch clusterStateLatch = new CountDownLatch(1);
- @Inject
- public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
- super(settings, transport, threadPool);
- }
+ public static class TestPlugin extends Plugin implements NetworkPlugin {
+ private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor();
- @Override @SuppressWarnings("unchecked")
- public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
- TransportRequestOptions options, TransportResponseHandler<T> handler) {
- if (TransportLivenessAction.NAME.equals(action)) {
- assertHeaders(threadPool);
- ((TransportResponseHandler<LivenessResponse>) handler).handleResponse(new LivenessResponse(clusterName, node));
- return;
- }
- if (ClusterStateAction.NAME.equals(action)) {
- assertHeaders(threadPool);
- ClusterName cluster1 = new ClusterName("cluster1");
- ClusterState.Builder builder = ClusterState.builder(cluster1);
- //the sniffer detects only data nodes
- builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(),
- Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)));
- ((TransportResponseHandler<ClusterStateResponse>) handler)
- .handleResponse(new ClusterStateResponse(cluster1, builder.build()));
- clusterStateLatch.countDown();
- return;
+ @Override
+ public List<TransportInterceptor> getTransportInterceptors() {
+ return Collections.singletonList(new TransportInterceptor() {
+ @Override
+ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
+ TransportRequestHandler<T> actualHandler) {
+ return instance.interceptHandler(action, actualHandler);
+ }
+
+ @Override
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return instance.interceptSender(sender);
+ }
+ });
}
-
- handler.handleException(new TransportException("", new InternalException(action)));
}
- @Override
- public boolean nodeConnected(DiscoveryNode node) {
- assertThat(node.getAddress(), equalTo(address));
- return true;
- }
+ final CountDownLatch clusterStateLatch = new CountDownLatch(1);
@Override
- public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
- assertThat(node.getAddress(), equalTo(address));
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return new AsyncSender() {
+ @Override
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options, TransportResponseHandler<T> handler) {
+ if (TransportLivenessAction.NAME.equals(action)) {
+ assertHeaders(threadPool);
+ ((TransportResponseHandler<LivenessResponse>) handler).handleResponse(
+ new LivenessResponse(new ClusterName("cluster1"), node));
+ return;
+ }
+ if (ClusterStateAction.NAME.equals(action)) {
+ assertHeaders(threadPool);
+ ClusterName cluster1 = new ClusterName("cluster1");
+ ClusterState.Builder builder = ClusterState.builder(cluster1);
+ //the sniffer detects only data nodes
+ builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(),
+ Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)));
+ ((TransportResponseHandler<ClusterStateResponse>) handler)
+ .handleResponse(new ClusterStateResponse(cluster1, builder.build()));
+ clusterStateLatch.countDown();
+ return;
+ }
+
+ handler.handleException(new TransportException("", new InternalException(action)));
+ }
+ };
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java
index 9b5b764b88..3f4fd50117 100644
--- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java
@@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
@@ -51,7 +52,7 @@ public class TransportClientIT extends ESIntegTestCase {
}
- public void testNodeVersionIsUpdated() throws IOException {
+ public void testNodeVersionIsUpdated() throws IOException, NodeValidationException {
TransportClient client = (TransportClient) internalCluster().client();
try (Node node = new Node(Settings.builder()
.put(internalCluster().getDefaultSettings())
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
index 41891c5831..1596519651 100644
--- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
@@ -29,12 +29,12 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
-import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
@@ -76,28 +76,22 @@ public class TransportClientNodesServiceTests extends ESTestCase {
return new TestResponse();
}
};
- transportService = new TransportService(settings, transport, threadPool) {
- @Override
- public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action,
- TransportRequest request, final TransportResponseHandler<T> handler) {
- if (TransportLivenessAction.NAME.equals(action)) {
- super.sendRequest(node, action, request, wrapLivenessResponseHandler(handler, node, clusterName));
- } else {
- super.sendRequest(node, action, request, handler);
- }
- }
-
+ transportService = new TransportService(settings, transport, threadPool, new TransportInterceptor() {
@Override
- public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
- TransportRequestOptions options,
- TransportResponseHandler<T> handler) {
- if (TransportLivenessAction.NAME.equals(action)) {
- super.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName));
- } else {
- super.sendRequest(node, action, request, options, handler);
- }
+ public AsyncSender interceptSender(AsyncSender sender) {
+ return new AsyncSender() {
+ @Override
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options, TransportResponseHandler<T> handler) {
+ if (TransportLivenessAction.NAME.equals(action)) {
+ sender.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName));
+ } else {
+ sender.sendRequest(node, action, request, options, handler);
+ }
+ }
+ };
}
- };
+ });
transportService.start();
transportService.acceptIncomingRequests();
transportClientNodesService =
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java
index 99afee8b2c..1fcab355d6 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoTests.java
@@ -18,15 +18,12 @@
*/
package org.elasticsearch.cluster;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.snapshots.Snapshot;
-import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.test.ESTestCase;
public class ClusterInfoTests extends ESTestCase {
@@ -74,12 +71,8 @@ public class ClusterInfoTests extends ESTestCase {
int numEntries = randomIntBetween(0, 128);
ImmutableOpenMap.Builder<ShardRouting, String> builder = ImmutableOpenMap.builder(numEntries);
for (int i = 0; i < numEntries; i++) {
- RestoreSource restoreSource = new RestoreSource(new Snapshot(randomAsciiOfLength(4),
- new SnapshotId(randomAsciiOfLength(4), randomAsciiOfLength(4))), Version.CURRENT, randomAsciiOfLength(4));
- UnassignedInfo.Reason reason = randomFrom(UnassignedInfo.Reason.values());
- UnassignedInfo unassignedInfo = new UnassignedInfo(reason, randomAsciiOfLength(4));
ShardId shardId = new ShardId(randomAsciiOfLength(32), randomAsciiOfLength(32), randomIntBetween(0, Integer.MAX_VALUE));
- ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, restoreSource, randomBoolean(), unassignedInfo);
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, randomBoolean(), ShardRoutingState.UNASSIGNED);
builder.put(shardRouting, randomAsciiOfLength(32));
}
return builder.build();
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
index 98404a2275..5c710ec92d 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
@@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.ModuleTestCase;
@@ -37,13 +38,18 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.plugins.ClusterPlugin;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.function.Supplier;
+
public class ClusterModuleTests extends ModuleTestCase {
private ClusterService clusterService = new ClusterService(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null);
- public static class FakeAllocationDecider extends AllocationDecider {
+ static class FakeAllocationDecider extends AllocationDecider {
protected FakeAllocationDecider(Settings settings) {
super(settings);
}
@@ -61,13 +67,6 @@ public class ClusterModuleTests extends ModuleTestCase {
}
}
- static class FakeIndexTemplateFilter implements IndexTemplateFilter {
- @Override
- public boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template) {
- return false;
- }
- }
-
public void testRegisterClusterDynamicSettingDuplicate() {
try {
new SettingsModule(Settings.EMPTY, EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING);
@@ -99,64 +98,62 @@ public class ClusterModuleTests extends ModuleTestCase {
}
public void testRegisterAllocationDeciderDuplicate() {
- ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService);
- try {
- module.registerAllocationDecider(EnableAllocationDecider.class);
- } catch (IllegalArgumentException e) {
- assertEquals(e.getMessage(),
- "Can't register the same [allocation_decider] more than once for [" + EnableAllocationDecider.class.getName() + "]");
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
+ new ClusterModule(Settings.EMPTY, clusterService,
+ Collections.<ClusterPlugin>singletonList(new ClusterPlugin() {
+ @Override
+ public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
+ return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings));
+ }
+ })));
+ assertEquals(e.getMessage(),
+ "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice");
}
public void testRegisterAllocationDecider() {
- ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService);
- module.registerAllocationDecider(FakeAllocationDecider.class);
- assertSetMultiBinding(module, AllocationDecider.class, FakeAllocationDecider.class);
+ ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService,
+ Collections.singletonList(new ClusterPlugin() {
+ @Override
+ public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
+ return Collections.singletonList(new FakeAllocationDecider(settings));
+ }
+ }));
+ assertTrue(module.allocationDeciders.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class)));
+ }
+
+ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, String name, Supplier<ShardsAllocator> supplier) {
+ return new ClusterModule(settings, clusterService, Collections.singletonList(
+ new ClusterPlugin() {
+ @Override
+ public Map<String, Supplier<ShardsAllocator>> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) {
+ return Collections.singletonMap(name, supplier);
+ }
+ }
+ ));
}
public void testRegisterShardsAllocator() {
Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "custom").build();
- ClusterModule module = new ClusterModule(settings, clusterService);
- module.registerShardsAllocator("custom", FakeShardsAllocator.class);
- assertBinding(module, ShardsAllocator.class, FakeShardsAllocator.class);
+ ClusterModule module = newClusterModuleWithShardsAllocator(settings, "custom", FakeShardsAllocator::new);
+ assertEquals(FakeShardsAllocator.class, module.shardsAllocator.getClass());
}
public void testRegisterShardsAllocatorAlreadyRegistered() {
- ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService);
- try {
- module.registerShardsAllocator(ClusterModule.BALANCED_ALLOCATOR, FakeShardsAllocator.class);
- } catch (IllegalArgumentException e) {
- assertEquals(e.getMessage(), "Can't register the same [shards_allocator] more than once for [balanced]");
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
+ newClusterModuleWithShardsAllocator(Settings.EMPTY, ClusterModule.BALANCED_ALLOCATOR, FakeShardsAllocator::new));
+ assertEquals("ShardsAllocator [" + ClusterModule.BALANCED_ALLOCATOR + "] already defined", e.getMessage());
}
public void testUnknownShardsAllocator() {
Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build();
- ClusterModule module = new ClusterModule(settings, clusterService);
- assertBindingFailure(module, "Unknown [shards_allocator]");
- }
-
- public void testEvenShardsAllocatorBackcompat() {
- Settings settings = Settings.builder()
- .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR).build();
- ClusterModule module = new ClusterModule(settings, clusterService);
- assertBinding(module, ShardsAllocator.class, BalancedShardsAllocator.class);
- }
-
- public void testRegisterIndexTemplateFilterDuplicate() {
- ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService);
- try {
- module.registerIndexTemplateFilter(FakeIndexTemplateFilter.class);
- module.registerIndexTemplateFilter(FakeIndexTemplateFilter.class);
- } catch (IllegalArgumentException e) {
- assertEquals(e.getMessage(),
- "Can't register the same [index_template_filter] more than once for [" + FakeIndexTemplateFilter.class.getName() + "]");
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
+ new ClusterModule(settings, clusterService, Collections.emptyList()));
+ assertEquals("Unknown ShardsAllocator [dne]", e.getMessage());
}
- public void testRegisterIndexTemplateFilter() {
- ClusterModule module = new ClusterModule(Settings.EMPTY, clusterService);
- module.registerIndexTemplateFilter(FakeIndexTemplateFilter.class);
- assertSetMultiBinding(module, IndexTemplateFilter.class, FakeIndexTemplateFilter.class);
+ public void testShardsAllocatorFactoryNull() {
+ Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "bad").build();
+ NullPointerException e = expectThrows(NullPointerException.class, () ->
+ newClusterModuleWithShardsAllocator(settings, "bad", () -> null));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
index 1a7a2093a5..db59e78587 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
@@ -245,7 +245,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
unassignedInfo = new UnassignedInfo(randomReason(), randomAsciiOfLength(10));
}
indexShard.addShard(
- TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, null, j == 0,
+ TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, j == 0,
ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)), unassignedInfo));
}
builder.addIndexShard(indexShard.build());
diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
index 2073235af9..b9e07a9207 100644
--- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
@@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.UnassignedInfo;
@@ -101,13 +102,13 @@ public class DiskUsageTests extends ESTestCase {
public void testFillShardLevelInfo() {
final Index index = new Index("test", "0xdeadbeef");
- ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, "node1");
test_0 = ShardRoutingHelper.moveToStarted(test_0);
Path test0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
CommonStats commonStats0 = new CommonStats();
commonStats0.store = new StoreStats(100, 1);
- ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, "node2");
test_1 = ShardRoutingHelper.moveToStarted(test_1);
Path test1Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("1");
@@ -136,14 +137,14 @@ public class DiskUsageTests extends ESTestCase {
public void testFillShardsWithShadowIndices() {
final Index index = new Index("non-shadow", "0xcafe0000");
- ShardRouting s0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting s0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
s0 = ShardRoutingHelper.initialize(s0, "node1");
s0 = ShardRoutingHelper.moveToStarted(s0);
Path i0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
CommonStats commonStats0 = new CommonStats();
commonStats0.store = new StoreStats(100, 1);
final Index index2 = new Index("shadow", "0xcafe0001");
- ShardRouting s1 = ShardRouting.newUnassigned(new ShardId(index2, 0), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting s1 = ShardRouting.newUnassigned(new ShardId(index2, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
s1 = ShardRoutingHelper.initialize(s1, "node2");
s1 = ShardRoutingHelper.moveToStarted(s1);
Path i1Path = createTempDir().resolve("indices").resolve(index2.getUUID()).resolve("0");
@@ -267,8 +268,8 @@ public class DiskUsageTests extends ESTestCase {
assertNotNull(usage);
assertNotNull(path);
assertEquals(usage.toString(), usage.getPath(), path.getPath());
- assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().bytes());
- assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().bytes());
+ assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().getBytes());
+ assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().getBytes());
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java
index 33141107b2..2e86cb5b89 100644
--- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java
@@ -29,9 +29,8 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
-import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
@@ -65,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
@ESIntegTestCase.SuppressLocalMode
-@TestLogging("_root:DEBUG,cluster.service:TRACE,discovery.zen:TRACE")
+@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE")
public class MinimumMasterNodesIT extends ESIntegTestCase {
@Override
@@ -364,7 +363,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
public void testCanNotPublishWithoutMinMastNodes() throws Exception {
Settings settings = Settings.builder()
.put("discovery.type", "zen")
- .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1h") // disable it
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
@@ -379,7 +377,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
new TwoPartitions(Collections.singleton(master), otherNodes),
new NetworkDelay(TimeValue.timeValueMinutes(1)));
internalCluster().setDisruptionScheme(partition);
- partition.startDisrupting();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Exception> failure = new AtomicReference<>();
@@ -393,6 +390,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
+ logger.debug("--> starting the disruption, preventing cluster state publishing");
+ partition.startDisrupting();
MetaData.Builder metaData = MetaData.builder(currentState.metaData()).persistentSettings(
Settings.builder().put(currentState.metaData().persistentSettings()).put("_SHOULD_NOT_BE_THERE_", true).build()
);
diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java
index ed7a20dc87..5bf2bc38c3 100644
--- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java
@@ -149,7 +149,7 @@ public class NodeConnectionsServiceTests extends ESTestCase {
public void setUp() throws Exception {
super.setUp();
this.transport = new MockTransport();
- transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL);
+ transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java
index 1f98f2cdc9..00edd53625 100644
--- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java
@@ -25,6 +25,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -36,14 +37,13 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
+import org.elasticsearch.cluster.routing.allocation.StaleShard;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before;
import java.util.ArrayList;
@@ -56,7 +56,6 @@ import java.util.stream.IntStream;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.CoreMatchers.not;
public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCase {
@@ -117,7 +116,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
List<ShardStateAction.ShardEntry> nonExistentTasks = createNonExistentShards(currentState, reason);
ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) {
@Override
- RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards) {
+ ClusterState applyFailedShards(ClusterState currentState, List<FailedShard> failedShards, List<StaleShard> staleShards) {
throw new RuntimeException("simulated applyFailedShards failure");
}
};
@@ -161,9 +160,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
allocationService.reroute(stateAfterAddingNode, reason).routingTable();
ClusterState stateAfterReroute = ClusterState.builder(stateAfterAddingNode).routingTable(afterReroute).build();
RoutingNodes routingNodes = stateAfterReroute.getRoutingNodes();
- RoutingTable afterStart =
- allocationService.applyStartedShards(stateAfterReroute, routingNodes.shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
- return ClusterState.builder(stateAfterReroute).routingTable(afterStart).build();
+ return allocationService.applyStartedShards(stateAfterReroute, routingNodes.shardsWithState(ShardRoutingState.INITIALIZING));
}
private List<ShardStateAction.ShardEntry> createExistingShards(ClusterState currentState, String reason) {
diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java
index 762e7d9e75..e042fadca9 100644
--- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java
@@ -106,7 +106,7 @@ public class ShardStateActionTests extends ESTestCase {
super.setUp();
this.transport = new CapturingTransport();
clusterService = createClusterService(THREAD_POOL);
- transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
+ transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null);
@@ -339,7 +339,8 @@ public class ShardStateActionTests extends ESTestCase {
long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id());
assertThat(primaryTerm, greaterThanOrEqualTo(1L));
- shardStateAction.remoteShardFailed(failedShard, primaryTerm + 1, "test", getSimulatedFailure(), new ShardStateAction.Listener() {
+ shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), primaryTerm + 1, "test",
+ getSimulatedFailure(), new ShardStateAction.Listener() {
@Override
public void onSuccess() {
failure.set(null);
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
index 94863dc4f5..31e841227b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
@@ -20,6 +20,7 @@
package org.elasticsearch.cluster.allocation;
import com.carrotsearch.hppc.ObjectIntHashMap;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@@ -27,11 +28,10 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.common.Priority;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.equalTo;
@ClusterScope(scope= ESIntegTestCase.Scope.TEST, numDataNodes =0, minNumDataNodes = 2)
public class AwarenessAllocationIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(AwarenessAllocationIT.class);
+ private final Logger logger = Loggers.getLogger(AwarenessAllocationIT.class);
@Override
protected int numberOfReplicas() {
@@ -79,7 +79,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
assertThat(awaitBusy(
() -> {
logger.info("--> waiting for no relocation");
- ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForRelocatingShards(0).get();
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get();
if (clusterHealth.isTimedOut()) {
return false;
}
@@ -131,7 +131,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
.put("index.number_of_replicas", 1)).execute().actionGet();
logger.info("--> waiting for shards to be allocated");
- health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
@@ -166,7 +166,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put("index.number_of_shards", 5)
.put("index.number_of_replicas", 1)).execute().actionGet();
- ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
@@ -186,7 +186,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();
- health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
@@ -208,7 +208,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();
- health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
@@ -229,7 +229,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
assertThat(counts.containsKey(noZoneNode), equalTo(false));
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
- health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
index 70af580824..22d02f5146 100644
--- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.allocation;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
@@ -39,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@@ -69,7 +69,7 @@ import static org.hamcrest.Matchers.hasSize;
*/
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class ClusterRerouteIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(ClusterRerouteIT.class);
+ private final Logger logger = Loggers.getLogger(ClusterRerouteIT.class);
public void testRerouteWithCommands_disableAllocationSettings() throws Exception {
Settings commonSettings = Settings.builder()
@@ -138,7 +138,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
- healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
@@ -335,7 +335,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertAcked(client().admin().cluster().prepareReroute()
.add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))));
- ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
} finally {
disableIndexBlock("test-blocks", blockSetting);
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java
index 4cc0408642..627fc03701 100644
--- a/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java
@@ -19,12 +19,12 @@
package org.elasticsearch.cluster.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
@@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo;
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
public class FilteringAllocationIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(FilteringAllocationIT.class);
+ private final Logger logger = Loggers.getLogger(FilteringAllocationIT.class);
public void testDecommissionNodeNoReplicas() throws Exception {
logger.info("--> starting 2 nodes");
diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java
index 0b0ea6b3f9..d80e16397a 100644
--- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java
@@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.RoutingTableGenerator;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -47,7 +48,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
@@ -91,7 +92,8 @@ public class ClusterStateHealthTests extends ESTestCase {
public void setUp() throws Exception {
super.setUp();
clusterService = createClusterService(threadPool);
- transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool);
+ transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
}
@@ -137,7 +139,7 @@ public class ClusterStateHealthTests extends ESTestCase {
listenerCalled.await();
TransportClusterHealthAction action = new TransportClusterHealthAction(Settings.EMPTY, transportService,
- clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, NoopGatewayAllocator.INSTANCE);
+ clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator());
PlainActionFuture<ClusterHealthResponse> listener = new PlainActionFuture<>();
action.execute(new ClusterHealthRequest(), listener);
@@ -275,9 +277,9 @@ public class ClusterStateHealthTests extends ESTestCase {
// if the inactive primaries are due solely to recovery (not failed allocation or previously being allocated)
// then cluster health is YELLOW, otherwise RED
if (primaryInactiveDueToRecovery(indexName, clusterState)) {
- assertThat(health.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
} else {
- assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.RED));
}
}
}
@@ -330,7 +332,7 @@ public class ClusterStateHealthTests extends ESTestCase {
boolean atLeastOne = false;
for (int i = 0; i < numberOfShards; i++) {
if (atLeastOne == false || randomBoolean()) {
- idxMetaWithAllocationIds.putActiveAllocationIds(i, Sets.newHashSet(UUIDs.randomBase64UUID()));
+ idxMetaWithAllocationIds.putInSyncAllocationIds(i, Sets.newHashSet(UUIDs.randomBase64UUID()));
atLeastOne = true;
}
}
@@ -400,7 +402,7 @@ public class ClusterStateHealthTests extends ESTestCase {
routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
IndexMetaData.Builder idxMetaBuilder = IndexMetaData.builder(clusterState.metaData().index(indexName));
for (final IntObjectCursor<Set<String>> entry : allocationIds.build()) {
- idxMetaBuilder.putActiveAllocationIds(entry.key, entry.value);
+ idxMetaBuilder.putInSyncAllocationIds(entry.key, entry.value);
}
MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.metaData()).put(idxMetaBuilder);
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaDataBuilder).build();
@@ -447,7 +449,7 @@ public class ClusterStateHealthTests extends ESTestCase {
routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
idxMetaBuilder = IndexMetaData.builder(clusterState.metaData().index(indexName));
for (final IntObjectCursor<Set<String>> entry : allocationIds.build()) {
- idxMetaBuilder.putActiveAllocationIds(entry.key, entry.value);
+ idxMetaBuilder.putInSyncAllocationIds(entry.key, entry.value);
}
metaDataBuilder = MetaData.builder(clusterState.metaData()).put(idxMetaBuilder);
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaDataBuilder).build();
@@ -518,16 +520,22 @@ public class ClusterStateHealthTests extends ESTestCase {
}
// returns true if the inactive primaries in the index are only due to cluster recovery
- // (not because of allocation failure or previously having allocation ids assigned)
+ // (not because of allocation of existing shard or previously having allocation ids assigned)
private boolean primaryInactiveDueToRecovery(final String indexName, final ClusterState clusterState) {
for (final IntObjectCursor<IndexShardRoutingTable> shardRouting : clusterState.routingTable().index(indexName).shards()) {
final ShardRouting primaryShard = shardRouting.value.primaryShard();
if (primaryShard.active() == false) {
- if (clusterState.metaData().index(indexName).activeAllocationIds(shardRouting.key).isEmpty() == false) {
+ if (clusterState.metaData().index(indexName).inSyncAllocationIds(shardRouting.key).isEmpty() == false) {
+ return false;
+ }
+ if (primaryShard.recoverySource() != null &&
+ primaryShard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE) {
+ return false;
+ }
+ if (primaryShard.unassignedInfo().getNumFailedAllocations() > 0) {
return false;
}
- if (primaryShard.unassignedInfo() != null &&
- primaryShard.unassignedInfo().getReason() == UnassignedInfo.Reason.ALLOCATION_FAILED) {
+ if (primaryShard.unassignedInfo().getLastAllocationStatus() == UnassignedInfo.AllocationStatus.DECIDERS_NO) {
return false;
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java
index f1f20511fc..d74b450f5b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java
@@ -39,7 +39,7 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays;
import java.util.Collections;
@@ -133,7 +133,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@@ -161,7 +161,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@@ -191,7 +191,9 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
validateIndexName("index#name", "must not contain '#'");
- validateIndexName("_indexname", "must not start with '_'");
+ validateIndexName("_indexname", "must not start with '_', '-', or '+'");
+ validateIndexName("-indexname", "must not start with '_', '-', or '+'");
+ validateIndexName("+indexname", "must not start with '_', '-', or '+'");
validateIndexName("INDEXNAME", "must be lowercase");
@@ -201,7 +203,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
private void validateIndexName(String indexName, String errorMessage) {
InvalidIndexNameException e = expectThrows(InvalidIndexNameException.class,
- () -> getCreateIndexService().validateIndexName(indexName, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING
+ () -> MetaDataCreateIndexService.validateIndexName(indexName, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY)).build()));
assertThat(e.getMessage(), endsWith(errorMessage));
}
@@ -213,7 +215,6 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
null,
null,
null,
- new HashSet<>(),
null,
null,
null,
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java
new file mode 100644
index 0000000000..a48f3ae3e1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.SnapshotsInProgress;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.repositories.IndexId;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotId;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.VersionUtils;
+
+import static java.util.Collections.singleton;
+import static java.util.Collections.singletonList;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+
+public class MetaDataDeleteIndexServiceTests extends ESTestCase {
+ private final AllocationService allocationService = mock(AllocationService.class);
+ private final MetaDataDeleteIndexService service = new MetaDataDeleteIndexService(Settings.EMPTY, null, allocationService);
+
+ public void testDeleteMissing() {
+ Index index = new Index("missing", "doesn't matter");
+ ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build();
+ IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.deleteIndices(state, singleton(index)));
+ assertEquals(index, e.getIndex());
+ }
+
+ public void testDeleteSnapshotting() {
+ String index = randomAsciiOfLength(5);
+ Snapshot snapshot = new Snapshot("doesn't matter", new SnapshotId("snapshot name", "snapshot uuid"));
+ SnapshotsInProgress snaps = new SnapshotsInProgress(new SnapshotsInProgress.Entry(snapshot, true, false,
+ SnapshotsInProgress.State.INIT, singletonList(new IndexId(index, "doesn't matter")),
+ System.currentTimeMillis(), ImmutableOpenMap.of()));
+ ClusterState state = ClusterState.builder(clusterState(index))
+ .putCustom(SnapshotsInProgress.TYPE, snaps)
+ .build();
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> service.deleteIndices(state, singleton(state.metaData().getIndices().get(index).getIndex())));
+ assertEquals("Cannot delete indices that are being snapshotted: [[" + index + "]]. Try again after snapshot finishes "
+ + "or cancel the currently running snapshot.", e.getMessage());
+ }
+
+ public void testDeleteUnassigned() {
+ // Create an unassigned index
+ String index = randomAsciiOfLength(5);
+ ClusterState before = clusterState(index);
+
+ // Mock the built reroute
+ when(allocationService.reroute(any(ClusterState.class), any(String.class))).then(i -> i.getArguments()[0]);
+
+ // Remove it
+ ClusterState after = service.deleteIndices(before, singleton(before.metaData().getIndices().get(index).getIndex()));
+
+ // It is gone
+ assertNull(after.metaData().getIndices().get(index));
+ assertNull(after.routingTable().index(index));
+ assertNull(after.blocks().indices().get(index));
+
+ // Make sure we actually attempted to reroute
+ verify(allocationService).reroute(any(ClusterState.class), any(String.class));
+ }
+
+ private ClusterState clusterState(String index) {
+ IndexMetaData indexMetaData = IndexMetaData.builder(index)
+ .settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random())))
+ .numberOfShards(1)
+ .numberOfReplicas(1)
+ .build();
+ return ClusterState.builder(ClusterName.DEFAULT)
+ .metaData(MetaData.builder().put(indexMetaData, false))
+ .routingTable(RoutingTable.builder().addAsNew(indexMetaData).build())
+ .blocks(ClusterBlocks.builder().addBlocks(indexMetaData))
+ .build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java
new file mode 100644
index 0000000000..03f62830ba
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.VersionUtils;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import static java.util.Collections.singletonList;
+import static org.hamcrest.Matchers.contains;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyCollectionOf;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class MetaDataIndexAliasesServiceTests extends ESTestCase {
+ private final AliasValidator aliasValidator = new AliasValidator(Settings.EMPTY);
+ private final MetaDataDeleteIndexService deleteIndexService = mock(MetaDataDeleteIndexService.class);
+ private final MetaDataIndexAliasesService service = new MetaDataIndexAliasesService(Settings.EMPTY, null, null, aliasValidator,
+ null, deleteIndexService);
+
+ public MetaDataIndexAliasesServiceTests() {
+ // Mock any deletes so we don't need to worry about how MetaDataDeleteIndexService does its job
+ when(deleteIndexService.deleteIndices(any(ClusterState.class), anyCollectionOf(Index.class))).then(i -> {
+ ClusterState state = (ClusterState) i.getArguments()[0];
+ @SuppressWarnings("unchecked")
+ Collection<Index> indices = (Collection<Index>) i.getArguments()[1];
+ MetaData.Builder meta = MetaData.builder(state.metaData());
+ for (Index index : indices) {
+ assertTrue("index now found", state.metaData().hasConcreteIndex(index.getName()));
+ meta.remove(index.getName()); // We only think about metadata for this test. Not routing or any other fun stuff.
+ }
+ return ClusterState.builder(state).metaData(meta).build();
+ });
+ }
+
+ public void testAddAndRemove() {
+ // Create a state with a single index
+ String index = randomAsciiOfLength(5);
+ ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), index);
+
+ // Add an alias to it
+ ClusterState after = service.innerExecute(before, singletonList(new AliasAction.Add(index, "test", null, null, null)));
+ AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test");
+ assertNotNull(alias);
+ assertTrue(alias.isAlias());
+ assertThat(alias.getIndices(), contains(after.metaData().index(index)));
+
+ // Remove the alias from it while adding another one
+ before = after;
+ after = service.innerExecute(before, Arrays.asList(
+ new AliasAction.Remove(index, "test"),
+ new AliasAction.Add(index, "test_2", null, null, null)));
+ assertNull(after.metaData().getAliasAndIndexLookup().get("test"));
+ alias = after.metaData().getAliasAndIndexLookup().get("test_2");
+ assertNotNull(alias);
+ assertTrue(alias.isAlias());
+ assertThat(alias.getIndices(), contains(after.metaData().index(index)));
+
+ // Now just remove on its own
+ before = after;
+ after = service.innerExecute(before, singletonList(new AliasAction.Remove(index, "test_2")));
+ assertNull(after.metaData().getAliasAndIndexLookup().get("test"));
+ assertNull(after.metaData().getAliasAndIndexLookup().get("test_2"));
+ }
+
+ public void testSwapIndexWithAlias() {
+ // Create "test" and "test_2"
+ ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test");
+ before = createIndex(before, "test_2");
+
+ // Now remove "test" and add an alias to "test" to "test_2" in one go
+ ClusterState after = service.innerExecute(before, Arrays.asList(
+ new AliasAction.Add("test_2", "test", null, null, null),
+ new AliasAction.RemoveIndex("test")));
+ AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test");
+ assertNotNull(alias);
+ assertTrue(alias.isAlias());
+ assertThat(alias.getIndices(), contains(after.metaData().index("test_2")));
+ }
+
+ public void testAddAliasToRemovedIndex() {
+ // Create "test"
+ ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test");
+
+ // Attempt to add an alias to "test" at the same time as we remove it
+ IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.innerExecute(before, Arrays.asList(
+ new AliasAction.Add("test", "alias", null, null, null),
+ new AliasAction.RemoveIndex("test"))));
+ assertEquals("test", e.getIndex().getName());
+ }
+
+ public void testRemoveIndexTwice() {
+ // Create "test"
+ ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test");
+
+ // Try to remove an index twice. This should just remove the index once....
+ ClusterState after = service.innerExecute(before, Arrays.asList(
+ new AliasAction.RemoveIndex("test"),
+ new AliasAction.RemoveIndex("test")));
+ assertNull(after.metaData().getAliasAndIndexLookup().get("test"));
+ }
+
+ private ClusterState createIndex(ClusterState state, String index) {
+ IndexMetaData indexMetaData = IndexMetaData.builder(index)
+ .settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random())))
+ .numberOfShards(1)
+ .numberOfReplicas(1)
+ .build();
+ return ClusterState.builder(state)
+ .metaData(MetaData.builder(state.metaData()).put(indexMetaData, false))
+ .build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
index 52c52242c0..376feb305a 100644
--- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
@@ -56,7 +56,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
}
public void testUpgrade() {
- MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
+ MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
+ Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
assertFalse(service.isUpgraded(src));
src = service.upgradeIndexMetaData(src);
@@ -67,7 +68,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
}
public void testIsUpgraded() {
- MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
+ MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
+ Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
assertFalse(service.isUpgraded(src));
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion());
@@ -77,6 +79,26 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
assertTrue(service.isUpgraded(src));
}
+ public void testFailUpgrade() {
+ MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
+ Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
+ final IndexMetaData metaData = newIndexMeta("foo", Settings.builder()
+ .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("1.7.0"))
+ .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE,
+ Version.CURRENT.luceneVersion.toString()).build());
+ String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData)).getMessage();
+ assertEquals(message, "The index [[foo/BOOM]] was created before v2.0.0.beta1. It should be reindexed in Elasticsearch 2.x " +
+ "before upgrading to " + Version.CURRENT.toString() + ".");
+
+ IndexMetaData goodMeta = newIndexMeta("foo", Settings.builder()
+ .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.1.0"))
+ .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE,
+ Version.CURRENT.luceneVersion.toString()).build());
+ service.upgradeIndexMetaData(goodMeta);
+ }
+
public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java
index 744477d672..01110e796e 100644
--- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java
@@ -49,6 +49,10 @@ public class WildcardExpressionResolverTests extends ESTestCase {
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY")));
+ assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0));
+ assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY")));
+ assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0));
}
public void testConvertWildcardsTests() {
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java
index 9daee81219..036c168eee 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -38,7 +39,7 @@ import static org.hamcrest.Matchers.nullValue;
public class AllocationIdTests extends ESTestCase {
public void testShardToStarted() {
logger.info("-- create unassigned shard");
- ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
assertThat(shard.allocationId(), nullValue());
logger.info("-- initialize the shard");
@@ -58,7 +59,7 @@ public class AllocationIdTests extends ESTestCase {
public void testSuccessfulRelocation() {
logger.info("-- build started shard");
- ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard = shard.initialize("node1", null, -1);
shard = shard.moveToStarted();
@@ -81,7 +82,7 @@ public class AllocationIdTests extends ESTestCase {
public void testCancelRelocation() {
logger.info("-- build started shard");
- ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard = shard.initialize("node1", null, -1);
shard = shard.moveToStarted();
@@ -101,7 +102,7 @@ public class AllocationIdTests extends ESTestCase {
public void testMoveToUnassigned() {
logger.info("-- build started shard");
- ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard = shard.initialize("node1", null, -1);
shard = shard.moveToStarted();
@@ -112,13 +113,13 @@ public class AllocationIdTests extends ESTestCase {
public void testReinitializing() {
logger.info("-- build started shard");
- ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard = shard.initialize("node1", null, -1);
shard = shard.moveToStarted();
AllocationId allocationId = shard.allocationId();
logger.info("-- reinitializing shard");
- shard = shard.reinitializeShard();
+ shard = shard.reinitializePrimaryShard();
assertThat(shard.allocationId().getId(), notNullValue());
assertThat(shard.allocationId().getRelocationId(), nullValue());
assertThat(shard.allocationId().getId(), not(equalTo(allocationId.getId())));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java
index 8ce039e926..a700358384 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java
@@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -32,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
@@ -92,15 +92,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
+ clusterState = allocationService.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
ClusterState prevState = clusterState;
// remove node2 and reroute
@@ -110,8 +106,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
nodes.add(newNode("node3"));
}
clusterState = ClusterState.builder(clusterState).nodes(nodes).build();
- clusterState = ClusterState.builder(clusterState).routingResult(
- allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
ClusterState newState = clusterState;
List<ShardRouting> unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED);
if (nodeAvailableForAllocation) {
@@ -141,15 +136,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.build();
final long baseTimestampNanos = System.nanoTime();
allocationService.setNanoTimeOverride(baseTimestampNanos);
- clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
+ clusterState = allocationService.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertFalse("no shards should be unassigned", clusterState.getRoutingNodes().unassigned().size() > 0);
String nodeId = null;
final List<ShardRouting> allShards = clusterState.getRoutingTable().allShards("test");
@@ -164,8 +155,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
// remove node that has replica and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build();
- clusterState = ClusterState.builder(clusterState).routingResult(
- allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
ClusterState stateWithDelayedShard = clusterState;
// make sure the replica is marked as delayed (i.e. not reallocated)
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
@@ -238,15 +228,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.add(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0")
.add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
// allocate shards
- clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
+ clusterState = allocationService.reroute(clusterState, "reroute");
// start primaries
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// start replicas
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat("all shards should be started", clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4));
// find replica of short_delay
@@ -280,8 +266,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.build();
// make sure both replicas are marked as delayed (i.e. not reallocated)
allocationService.setNanoTimeOverride(baseTimestampNanos);
- clusterState = ClusterState.builder(clusterState).routingResult(
- allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocationService.deassociateDeadNodes(clusterState, true, "reroute");
final ClusterState stateWithDelayedShards = clusterState;
assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards));
RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator();
@@ -400,15 +385,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.build();
final long nodeLeftTimestampNanos = System.nanoTime();
allocationService.setNanoTimeOverride(nodeLeftTimestampNanos);
- clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
+ clusterState = allocationService.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState)
- .routingResult(allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)))
- .build();
+ clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertFalse("no shards should be unassigned", clusterState.getRoutingNodes().unassigned().size() > 0);
String nodeIdOfFooReplica = null;
for (ShardRouting shardRouting : clusterState.getRoutingTable().allShards("foo")) {
@@ -422,8 +403,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
// remove node that has replica and reroute
clusterState = ClusterState.builder(clusterState).nodes(
DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build();
- clusterState = ClusterState.builder(clusterState).routingResult(
- allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build();
+ clusterState = allocationService.deassociateDeadNodes(clusterState, true, "fake node left");
ClusterState stateWithDelayedShard = clusterState;
// make sure the replica is marked as delayed (i.e. not reallocated)
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
@@ -466,8 +446,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
// remove node that has replica and reroute
clusterState = ClusterState.builder(stateWithDelayedShard).nodes(
DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build();
- ClusterState stateWithShorterDelay = ClusterState.builder(clusterState).routingResult(
- allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build();
+ ClusterState stateWithShorterDelay = allocationService.deassociateDeadNodes(clusterState, true, "fake node left");
delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos);
delayedAllocationService.clusterChanged(
new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java
index 9077f9923e..8bc9c29bb3 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java
@@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand;
+import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
@@ -37,6 +38,7 @@ import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect;
import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
+import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
import java.util.Arrays;
@@ -153,7 +155,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)).get();
logger.info("--> wait until shard is failed and becomes unassigned again");
- assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()));
+ assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()));
assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED));
}
@@ -186,10 +188,15 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
// its possible that the shard has not completed initialization, even though the cluster health is yellow, so the
// search can throw an "all shards failed" exception. We will wait until the shard initialization has completed before
// verifying the search hit count.
- assertBusy(() -> assertTrue(clusterService().state().routingTable().index(idxName).allPrimaryShardsActive()));
-
+ assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get()
+ .getState().routingTable().index(idxName).allPrimaryShardsActive()));
}
assertHitCount(client().prepareSearch(idxName).setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1L : 0L);
+
+ // allocation id of old primary was cleaned from the in-sync set
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ assertEquals(Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()),
+ state.metaData().index(idxName).inSyncAllocationIds(0));
}
public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException {
@@ -204,6 +211,59 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
ensureGreen("test");
}
+ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception {
+ internalCluster().startMasterOnlyNode(Settings.EMPTY);
+ internalCluster().startDataOnlyNode(Settings.EMPTY);
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get());
+ String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
+ ensureGreen("test");
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
+ ensureYellow("test");
+ assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size());
+ internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
+ @Override
+ public boolean clearData(String nodeName) {
+ return true;
+ }
+ });
+ logger.info("--> wait until shard is failed and becomes unassigned again");
+ assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()));
+ assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size());
+
+ logger.info("--> starting node that reuses data folder with the up-to-date shard");
+ internalCluster().startDataOnlyNode(Settings.EMPTY);
+ ensureGreen("test");
+ }
+
+ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception {
+ internalCluster().startMasterOnlyNode(Settings.EMPTY);
+ internalCluster().startDataOnlyNode(Settings.EMPTY);
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get());
+ String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
+ ensureGreen("test");
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
+ ensureYellow("test");
+ assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size());
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size());
+ internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
+ @Override
+ public boolean clearData(String nodeName) {
+ return true;
+ }
+ });
+ logger.info("--> wait until shard is failed and becomes unassigned again");
+ assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()));
+ assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size());
+
+ logger.info("--> starting node that reuses data folder with the up-to-date shard");
+ internalCluster().startDataOnlyNode(Settings.EMPTY);
+ assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()));
+ }
+
public void testNotWaitForQuorumCopies() throws Exception {
logger.info("--> starting 3 nodes");
internalCluster().startNodesAsync(3).get();
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java
index 9e81f81e43..863a33b132 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java
@@ -21,18 +21,18 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.node.DiscoveryNodes.Builder;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -41,13 +41,12 @@ import java.util.Set;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
public class PrimaryTermsTests extends ESAllocationTestCase {
private static final String TEST_INDEX_1 = "test1";
private static final String TEST_INDEX_2 = "test2";
- private RoutingTable testRoutingTable;
private int numberOfShards;
private int numberOfReplicas;
private static final Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
@@ -64,7 +63,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_initial_primaries_recoveries", Integer.MAX_VALUE)
.build());
this.numberOfShards = randomIntBetween(1, 5);
- this.numberOfReplicas = randomIntBetween(1, 5);
+ this.numberOfReplicas = randomIntBetween(0, 5);
logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas);
this.primaryTermsPerIndex.clear();
MetaData metaData = MetaData.builder()
@@ -72,7 +71,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
.put(createIndexMetaData(TEST_INDEX_2))
.build();
- this.testRoutingTable = new RoutingTable.Builder()
+ RoutingTable routingTable = new RoutingTable.Builder()
.add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1))
.build())
.add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2))
@@ -80,7 +79,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
.build();
this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
- .metaData(metaData).routingTable(testRoutingTable).build();
+ .metaData(metaData).routingTable(routingTable).build();
}
/**
@@ -93,9 +92,8 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
discoBuilder = discoBuilder.add(newNode("node" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
- RoutingAllocation.Result rerouteResult = allocationService.reroute(clusterState, "reroute");
- this.testRoutingTable = rerouteResult.routingTable();
- assertThat(rerouteResult.changed(), is(true));
+ ClusterState rerouteResult = allocationService.reroute(clusterState, "reroute");
+ assertThat(rerouteResult, not(equalTo(this.clusterState)));
applyRerouteResult(rerouteResult);
primaryTermsPerIndex.keySet().forEach(this::incrementPrimaryTerm);
}
@@ -112,17 +110,16 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
}
private boolean startInitializingShards(String index) {
- this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
final List<ShardRouting> startedShards = this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING);
logger.info("start primary shards for index [{}]: {} ", index, startedShards);
- RoutingAllocation.Result rerouteResult = allocationService.applyStartedShards(this.clusterState, startedShards);
+ ClusterState rerouteResult = allocationService.applyStartedShards(this.clusterState, startedShards);
+ boolean changed = rerouteResult.equals(this.clusterState) == false;
applyRerouteResult(rerouteResult);
- return rerouteResult.changed();
+ return changed;
}
- private void applyRerouteResult(RoutingAllocation.Result rerouteResult) {
+ private void applyRerouteResult(ClusterState newClusterState) {
ClusterState previousClusterState = this.clusterState;
- ClusterState newClusterState = ClusterState.builder(previousClusterState).routingResult(rerouteResult).build();
ClusterState.Builder builder = ClusterState.builder(newClusterState).incrementVersion();
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1)
@@ -132,7 +129,6 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
}
this.clusterState = builder.build();
- this.testRoutingTable = rerouteResult.routingTable();
final ClusterStateHealth clusterHealth = new ClusterStateHealth(clusterState);
logger.info("applied reroute. active shards: p [{}], t [{}], init shards: [{}], relocating: [{}]",
clusterHealth.getActivePrimaryShards(), clusterHealth.getActiveShards(),
@@ -140,20 +136,18 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
}
private void failSomePrimaries(String index) {
- this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
- final IndexRoutingTable indexShardRoutingTable = testRoutingTable.index(index);
+ final IndexRoutingTable indexShardRoutingTable = clusterState.routingTable().index(index);
Set<Integer> shardIdsToFail = new HashSet<>();
for (int i = 1 + randomInt(numberOfShards - 1); i > 0; i--) {
shardIdsToFail.add(randomInt(numberOfShards - 1));
}
logger.info("failing primary shards {} for index [{}]", shardIdsToFail, index);
- List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>();
+ List<FailedShard> failedShards = new ArrayList<>();
for (int shard : shardIdsToFail) {
- failedShards.add(new FailedRerouteAllocation.FailedShard(indexShardRoutingTable.shard(shard).primaryShard(), "test", null));
+ failedShards.add(new FailedShard(indexShardRoutingTable.shard(shard).primaryShard(), "test", null));
incrementPrimaryTerm(index, shard); // the primary failure should increment the primary term;
}
- RoutingAllocation.Result rerouteResult = allocationService.applyFailedShards(this.clusterState, failedShards);
- applyRerouteResult(rerouteResult);
+ applyRerouteResult(allocationService.applyFailedShards(this.clusterState, failedShards,Collections.emptyList()));
}
private void addNodes() {
@@ -164,8 +158,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
nodesBuilder.add(newNode("extra_" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
- RoutingAllocation.Result rerouteResult = allocationService.reroute(this.clusterState, "nodes added");
- applyRerouteResult(rerouteResult);
+ applyRerouteResult(allocationService.reroute(this.clusterState, "nodes added"));
}
@@ -189,7 +182,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
private void assertPrimaryTerm(String index) {
final long[] terms = primaryTermsPerIndex.get(index);
final IndexMetaData indexMetaData = clusterState.metaData().index(index);
- for (IndexShardRoutingTable shardRoutingTable : this.testRoutingTable.index(index)) {
+ for (IndexShardRoutingTable shardRoutingTable : this.clusterState.routingTable().index(index)) {
final int shard = shardRoutingTable.shardId().id();
assertThat("primary term mismatch between indexMetaData of [" + index + "] and shard [" + shard + "]'s routing",
indexMetaData.primaryTerm(shard), equalTo(terms[shard]));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java
index a470ca9f9a..c3064c7fa9 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java
@@ -37,7 +37,8 @@ public final class RandomShardRoutingMutator {
if (shardRouting.unassigned() == false && shardRouting.primary() == false) {
shardRouting = shardRouting.moveToUnassigned(new UnassignedInfo(randomReason(), randomAsciiOfLength(10)));
} else if (shardRouting.unassignedInfo() != null) {
- shardRouting = shardRouting.updateUnassignedInfo(new UnassignedInfo(randomReason(), randomAsciiOfLength(10)));
+ shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(randomReason(), randomAsciiOfLength(10)),
+ shardRouting.recoverySource());
}
break;
case 1:
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RecoverySourceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RecoverySourceTests.java
new file mode 100644
index 0000000000..1929c15f7d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RecoverySourceTests.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+public class RecoverySourceTests extends ESTestCase {
+
+ public void testSerialization() throws IOException {
+ RecoverySource recoverySource = TestShardRouting.randomRecoverySource();
+ BytesStreamOutput out = new BytesStreamOutput();
+ recoverySource.writeTo(out);
+ RecoverySource serializedRecoverySource = RecoverySource.readFrom(out.bytes().streamInput());
+ assertEquals(recoverySource.getType(), serializedRecoverySource.getType());
+ assertEquals(recoverySource, serializedRecoverySource);
+ }
+
+ public void testRecoverySourceTypeOrder() {
+ assertEquals(RecoverySource.Type.EMPTY_STORE.ordinal(), 0);
+ assertEquals(RecoverySource.Type.EXISTING_STORE.ordinal(), 1);
+ assertEquals(RecoverySource.Type.PEER.ordinal(), 2);
+ assertEquals(RecoverySource.Type.SNAPSHOT.ordinal(), 3);
+ assertEquals(RecoverySource.Type.LOCAL_SHARDS.ordinal(), 4);
+ // check exhaustiveness
+ for (RecoverySource.Type type : RecoverySource.Type.values()) {
+ assertThat(type.ordinal(), greaterThanOrEqualTo(0));
+ assertThat(type.ordinal(), lessThanOrEqualTo(4));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java
index 4006ed0e1d..04277ba1eb 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java
@@ -20,7 +20,7 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.junit.Before;
import java.util.concurrent.atomic.AtomicBoolean;
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java
index 62002ad1f9..f53cce07da 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java
@@ -45,13 +45,13 @@ public class RoutingTableGenerator {
switch (state) {
case STARTED:
return TestShardRouting.newShardRouting(index, shardId, "node_" + Integer.toString(node_id++),
- null, null, primary, ShardRoutingState.STARTED);
+ null, primary, ShardRoutingState.STARTED);
case INITIALIZING:
return TestShardRouting.newShardRouting(index, shardId, "node_" + Integer.toString(node_id++),
- null, null, primary, ShardRoutingState.INITIALIZING);
+ null, primary, ShardRoutingState.INITIALIZING);
case RELOCATING:
return TestShardRouting.newShardRouting(index, shardId, "node_" + Integer.toString(node_id++),
- "node_" + Integer.toString(node_id++), null, primary, ShardRoutingState.RELOCATING);
+ "node_" + Integer.toString(node_id++), primary, ShardRoutingState.RELOCATING);
default:
throw new ElasticsearchException("Unknown state: " + state.name());
}
@@ -62,11 +62,11 @@ public class RoutingTableGenerator {
final String index = indexMetaData.getIndex().getName();
IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(new ShardId(index, "_na_", shardId));
ShardRouting shardRouting = genShardRouting(index, shardId, true);
- counter.update(shardRouting, indexMetaData);
+ counter.update(shardRouting);
builder.addShard(shardRouting);
for (int replicas = indexMetaData.getNumberOfReplicas(); replicas > 0; replicas--) {
shardRouting = genShardRouting(index, shardId, false);
- counter.update(shardRouting, indexMetaData);
+ counter.update(shardRouting);
builder.addShard(shardRouting);
}
@@ -104,7 +104,7 @@ public class RoutingTableGenerator {
return ClusterHealthStatus.GREEN;
}
- public void update(ShardRouting shardRouting, IndexMetaData indexMetaData) {
+ public void update(ShardRouting shardRouting) {
if (shardRouting.active()) {
active++;
if (shardRouting.primary()) {
@@ -119,7 +119,7 @@ public class RoutingTableGenerator {
if (shardRouting.primary()) {
primaryInactive++;
if (inactivePrimaryCausesRed == false) {
- inactivePrimaryCausesRed = getInactivePrimaryHealth(shardRouting, indexMetaData) == ClusterHealthStatus.RED;
+ inactivePrimaryCausesRed = getInactivePrimaryHealth(shardRouting) == ClusterHealthStatus.RED;
}
}
if (shardRouting.initializing()) {
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java
index 0f3ad8001c..6ed42ee45a 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java
@@ -21,20 +21,24 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.node.DiscoveryNodes.Builder;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before;
+import java.util.Set;
+import java.util.stream.Collectors;
+
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
public class RoutingTableTests extends ESAllocationTestCase {
@@ -42,7 +46,6 @@ public class RoutingTableTests extends ESAllocationTestCase {
private static final String TEST_INDEX_1 = "test1";
private static final String TEST_INDEX_2 = "test2";
private RoutingTable emptyRoutingTable;
- private RoutingTable testRoutingTable;
private int numberOfShards;
private int numberOfReplicas;
private int shardsPerIndex;
@@ -69,7 +72,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
.put(createIndexMetaData(TEST_INDEX_2))
.build();
- this.testRoutingTable = new RoutingTable.Builder()
+ RoutingTable testRoutingTable = new RoutingTable.Builder()
.add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1)).build())
.add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2)).build())
.build();
@@ -86,18 +89,14 @@ public class RoutingTableTests extends ESAllocationTestCase {
discoBuilder = discoBuilder.add(newNode("node" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
- RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute");
- this.testRoutingTable = rerouteResult.routingTable();
- assertThat(rerouteResult.changed(), is(true));
- this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ ClusterState rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute");
+ assertThat(rerouteResult, not(equalTo(this.clusterState)));
+ this.clusterState = rerouteResult;
}
private void startInitializingShards(String index) {
- this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
logger.info("start primary shards for index {}", index);
- RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING));
- this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
- this.testRoutingTable = rerouteResult.routingTable();
+ this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING));
}
private IndexMetaData.Builder createIndexMetaData(String indexName) {
@@ -109,11 +108,11 @@ public class RoutingTableTests extends ESAllocationTestCase {
public void testAllShards() {
assertThat(this.emptyRoutingTable.allShards().size(), is(0));
- assertThat(this.testRoutingTable.allShards().size(), is(this.totalNumberOfShards));
+ assertThat(this.clusterState.routingTable().allShards().size(), is(this.totalNumberOfShards));
- assertThat(this.testRoutingTable.allShards(TEST_INDEX_1).size(), is(this.shardsPerIndex));
+ assertThat(this.clusterState.routingTable().allShards(TEST_INDEX_1).size(), is(this.shardsPerIndex));
try {
- assertThat(this.testRoutingTable.allShards("not_existing").size(), is(0));
+ assertThat(this.clusterState.routingTable().allShards("not_existing").size(), is(0));
fail("Exception expected when calling allShards() with non existing index name");
} catch (IndexNotFoundException e) {
// expected
@@ -121,69 +120,69 @@ public class RoutingTableTests extends ESAllocationTestCase {
}
public void testHasIndex() {
- assertThat(this.testRoutingTable.hasIndex(TEST_INDEX_1), is(true));
- assertThat(this.testRoutingTable.hasIndex("foobar"), is(false));
+ assertThat(clusterState.routingTable().hasIndex(TEST_INDEX_1), is(true));
+ assertThat(clusterState.routingTable().hasIndex("foobar"), is(false));
}
public void testIndex() {
- assertThat(this.testRoutingTable.index(TEST_INDEX_1).getIndex().getName(), is(TEST_INDEX_1));
- assertThat(this.testRoutingTable.index("foobar"), is(nullValue()));
+ assertThat(clusterState.routingTable().index(TEST_INDEX_1).getIndex().getName(), is(TEST_INDEX_1));
+ assertThat(clusterState.routingTable().index("foobar"), is(nullValue()));
}
public void testIndicesRouting() {
- assertThat(this.testRoutingTable.indicesRouting().size(), is(2));
- assertThat(this.testRoutingTable.getIndicesRouting().size(), is(2));
- assertSame(this.testRoutingTable.getIndicesRouting(), this.testRoutingTable.indicesRouting());
+ assertThat(clusterState.routingTable().indicesRouting().size(), is(2));
+ assertThat(clusterState.routingTable().getIndicesRouting().size(), is(2));
+ assertSame(clusterState.routingTable().getIndicesRouting(), clusterState.routingTable().indicesRouting());
}
public void testShardsWithState() {
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards));
initPrimaries();
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - 2 * this.numberOfShards));
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - 2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(2 * this.numberOfShards));
startInitializingShards(TEST_INDEX_1);
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(this.numberOfShards));
int initializingExpected = this.numberOfShards + this.numberOfShards * this.numberOfReplicas;
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - this.numberOfShards));
startInitializingShards(TEST_INDEX_2);
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(2 * this.numberOfShards));
initializingExpected = 2 * this.numberOfShards * this.numberOfReplicas;
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards));
// now start all replicas too
startInitializingShards(TEST_INDEX_1);
startInitializingShards(TEST_INDEX_2);
- assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(this.totalNumberOfShards));
+ assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(this.totalNumberOfShards));
}
public void testActivePrimaryShardsGrouped() {
assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0));
assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
initPrimaries();
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
startInitializingShards(TEST_INDEX_1);
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
startInitializingShards(TEST_INDEX_2);
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
- assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards));
try {
- this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
+ clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
fail("Calling with non-existing index name should raise IndexMissingException");
} catch (IndexNotFoundException e) {
// expected
@@ -194,43 +193,43 @@ public class RoutingTableTests extends ESAllocationTestCase {
assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], true).size(), is(0));
assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], false).size(), is(0));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
initPrimaries();
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
startInitializingShards(TEST_INDEX_1);
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
startInitializingShards(TEST_INDEX_2);
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
- assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
try {
- this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
+ clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
} catch (IndexNotFoundException e) {
fail("Calling with non-existing index should be ignored at the moment");
}
}
public void testAllAssignedShardsGrouped() {
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
initPrimaries();
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
- assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
try {
- this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false);
+ clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false);
} catch (IndexNotFoundException e) {
fail("Calling with non-existing index should be ignored at the moment");
}
@@ -239,19 +238,19 @@ public class RoutingTableTests extends ESAllocationTestCase {
public void testAllShardsForMultipleIndices() {
assertThat(this.emptyRoutingTable.allShards(new String[0]).size(), is(0));
- assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
initPrimaries();
- assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
startInitializingShards(TEST_INDEX_1);
- assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
+ assertThat(clusterState.routingTable().allShards(new String[]{TEST_INDEX_1}).size(), is(this.shardsPerIndex));
startInitializingShards(TEST_INDEX_2);
- assertThat(this.testRoutingTable.allShards(new String[]{TEST_INDEX_1, TEST_INDEX_2}).size(), is(this.totalNumberOfShards));
+ assertThat(clusterState.routingTable().allShards(new String[]{TEST_INDEX_1, TEST_INDEX_2}).size(), is(this.totalNumberOfShards));
try {
- this.testRoutingTable.allShards(new String[]{TEST_INDEX_1, "not_exists"});
+ clusterState.routingTable().allShards(new String[]{TEST_INDEX_1, "not_exists"});
} catch (IndexNotFoundException e) {
fail("Calling with non-existing index should be ignored at the moment");
}
@@ -296,10 +295,11 @@ public class RoutingTableTests extends ESAllocationTestCase {
.numberOfShards(numShards)
.numberOfReplicas(numReplicas)
.build();
- MetaData metaData = MetaData.builder().put(indexMetaData, true).build();
final RoutingTableGenerator routingTableGenerator = new RoutingTableGenerator();
final RoutingTableGenerator.ShardCounter counter = new RoutingTableGenerator.ShardCounter();
final IndexRoutingTable indexRoutingTable = routingTableGenerator.genIndexRoutingTable(indexMetaData, counter);
+ indexMetaData = updateActiveAllocations(indexRoutingTable, indexMetaData);
+ MetaData metaData = MetaData.builder().put(indexMetaData, true).build();
// test no validation errors
assertTrue(indexRoutingTable.validate(metaData));
// test wrong number of shards causes validation errors
@@ -327,4 +327,23 @@ public class RoutingTableTests extends ESAllocationTestCase {
final MetaData metaData4 = MetaData.builder().put(indexMetaData, true).build();
expectThrows(IllegalStateException.class, () -> indexRoutingTable.validate(metaData4));
}
+
+ /** reverse engineer the in sync aid based on the given indexRoutingTable **/
+ public static IndexMetaData updateActiveAllocations(IndexRoutingTable indexRoutingTable, IndexMetaData indexMetaData) {
+ IndexMetaData.Builder imdBuilder = IndexMetaData.builder(indexMetaData);
+ for (IndexShardRoutingTable shardTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : shardTable) {
+ Set<String> insyncAids = shardTable.activeShards().stream().map(
+ shr -> shr.allocationId().getId()).collect(Collectors.toSet());
+ final ShardRouting primaryShard = shardTable.primaryShard();
+ if (primaryShard.initializing() && primaryShard.relocating() == false &&
+ RecoverySource.isInitialRecovery(primaryShard.recoverySource().getType()) == false ) {
+ // simulate a primary was initialized based on aid
+ insyncAids.add(primaryShard.allocationId().getId());
+ }
+ imdBuilder.putInSyncAllocationIds(shardRouting.id(), insyncAids);
+ }
+ }
+ return imdBuilder.build();
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
index a689acd04a..a4c8d0e424 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
@@ -21,6 +21,8 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.snapshots.Snapshot;
@@ -54,20 +56,6 @@ public class ShardRoutingTests extends ESTestCase {
assertFalse(unassignedShard0.isSameAllocation(startedShard1));
}
- public void testIsSameShard() {
- ShardRouting index1Shard0a = randomShardRouting("index1", 0);
- ShardRouting index1Shard0b = randomShardRouting("index1", 0);
- ShardRouting index1Shard1 = randomShardRouting("index1", 1);
- ShardRouting index2Shard0 = randomShardRouting("index2", 0);
- ShardRouting index2Shard1 = randomShardRouting("index2", 1);
-
- assertTrue(index1Shard0a.isSameShard(index1Shard0a));
- assertTrue(index1Shard0a.isSameShard(index1Shard0b));
- assertFalse(index1Shard0a.isSameShard(index1Shard1));
- assertFalse(index1Shard0a.isSameShard(index2Shard0));
- assertFalse(index1Shard0a.isSameShard(index2Shard1));
- }
-
private ShardRouting randomShardRouting(String index, int shard) {
ShardRoutingState state = randomFrom(ShardRoutingState.values());
return TestShardRouting.newShardRouting(index, shard, state == ShardRoutingState.UNASSIGNED ? null : "1", state != ShardRoutingState.UNASSIGNED && randomBoolean(), state);
@@ -126,39 +114,49 @@ public class ShardRoutingTests extends ESTestCase {
Integer[] changeIds = new Integer[]{0, 1, 2, 3, 4, 5, 6};
for (int changeId : randomSubsetOf(randomIntBetween(1, changeIds.length), changeIds)) {
+ boolean unchanged = false;
switch (changeId) {
case 0:
// change index
- otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName() + "a", otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
+ ShardId shardId = new ShardId(new Index("blubb", randomAsciiOfLength(10)), otherRouting.id());
+ otherRouting = new ShardRouting(shardId, otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
+ otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(),
+ otherRouting.allocationId(), otherRouting.getExpectedShardSize());
break;
case 1:
// change shard id
- otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id() + 1, otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
+ otherRouting = new ShardRouting(new ShardId(otherRouting.index(), otherRouting.id() + 1), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
+ otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(),
+ otherRouting.allocationId(), otherRouting.getExpectedShardSize());
break;
case 2:
// change current node
- otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
+ otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(),
+ otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(),
+ otherRouting.allocationId(), otherRouting.getExpectedShardSize());
break;
case 3:
// change relocating node
- otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(),
+ otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(),
otherRouting.relocatingNodeId() == null ? "1" : otherRouting.relocatingNodeId() + "_1",
- otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
+ otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(),
+ otherRouting.allocationId(), otherRouting.getExpectedShardSize());
break;
case 4:
- // change restore source
- otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource() == null ? new RestoreSource(new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test") :
- new RestoreSource(otherRouting.restoreSource().snapshot(), Version.CURRENT, otherRouting.index() + "_1"),
- otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
+ // change recovery source (only works for inactive primaries)
+ if (otherRouting.active() || otherRouting.primary() == false) {
+ unchanged = true;
+ } else {
+ otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
+ otherRouting.primary(), otherRouting.state(),
+ new RecoverySource.SnapshotRecoverySource(new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"),
+ otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize());
+ }
break;
case 5:
// change primary flag
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo());
+ otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo());
break;
case 6:
// change state
@@ -173,20 +171,22 @@ public class ShardRoutingTests extends ESTestCase {
}
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary(), newState, unassignedInfo);
+ otherRouting.primary(), newState, unassignedInfo);
break;
}
if (randomBoolean()) {
// change unassigned info
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(),
+ otherRouting.primary(), otherRouting.state(),
otherRouting.unassignedInfo() == null ? new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") :
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, otherRouting.unassignedInfo().getMessage() + "_1"));
}
- logger.debug("comparing\nthis {} to\nother {}", routing, otherRouting);
- assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, routing.equalsIgnoringMetaData(otherRouting));
+ if (unchanged == false) {
+ logger.debug("comparing\nthis {} to\nother {}", routing, otherRouting);
+ assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, routing.equalsIgnoringMetaData(otherRouting));
+ }
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
index f35bd6d559..5eff8a0a53 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
@@ -24,12 +24,14 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@@ -38,7 +40,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -69,7 +70,8 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
UnassignedInfo.Reason.REROUTE_CANCELLED,
UnassignedInfo.Reason.REINITIALIZED,
UnassignedInfo.Reason.REALLOCATED_REPLICA,
- UnassignedInfo.Reason.PRIMARY_FAILED};
+ UnassignedInfo.Reason.PRIMARY_FAILED,
+ UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY};
for (int i = 0; i < order.length; i++) {
assertThat(order[i].ordinal(), equalTo(i));
}
@@ -136,7 +138,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
- .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build();
+ .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED));
}
@@ -148,7 +150,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
- .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build();
+ .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED));
}
@@ -176,9 +178,9 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index(index)).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index);
for (IndexShardRoutingTable indexShardRoutingTable : clusterState.routingTable().index(index)) {
builder.addIndexShard(indexShardRoutingTable);
@@ -194,7 +196,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
* The unassigned meta is kept when a shard goes to INITIALIZING, but cleared when it moves to STARTED.
*/
public void testStateTransitionMetaHandling() {
- ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
assertThat(shard.unassignedInfo(), notNullValue());
shard = shard.initialize("test_node", null, -1);
assertThat(shard.state(), equalTo(ShardRoutingState.INITIALIZING));
@@ -216,15 +218,15 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
// verify that NODE_LEAVE is the reason for meta
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1));
@@ -245,15 +247,15 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// fail shard
ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0);
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build();
+ clusterState = allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedShard(shardToFail, "test fail", null)));
// verify the reason and details
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1));
@@ -295,17 +297,17 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
// make sure both replicas are marked as delayed (i.e. not reallocated)
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2));
}
@@ -323,24 +325,24 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
final long baseTime = System.nanoTime();
allocation.setNanoTimeOverride(baseTime);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1);
if (delta > 0) {
allocation.setNanoTimeOverride(baseTime + delta);
- clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "time moved")).build();
+ clusterState = allocation.reroute(clusterState, "time moved");
}
assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), equalTo(expectMinDelaySettingsNanos - delta));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java
deleted file mode 100644
index 9cfac5da16..0000000000
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java
+++ /dev/null
@@ -1,102 +0,0 @@
-package org.elasticsearch.cluster.routing.allocation;
-
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-
-import java.util.Arrays;
-import java.util.HashSet;
-
-import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
-import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
-import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
-import static org.hamcrest.Matchers.equalTo;
-
-public class ActiveAllocationIdTests extends ESAllocationTestCase {
-
- public void testActiveAllocationIdsUpdated() {
- AllocationService allocation = createAllocationService();
-
- logger.info("creating an index with 1 shard, 2 replicas");
- MetaData metaData = MetaData.builder()
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
- // add index metadata where we have no routing nodes to check that allocation ids are not removed
- .put(IndexMetaData.builder("test-old").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)
- .putActiveAllocationIds(0, new HashSet<>(Arrays.asList("x", "y"))))
- .build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
- .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
-
- logger.info("adding three nodes and performing rerouting");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(
- newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
-
- assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(0));
- assertThat(clusterState.metaData().index("test-old").activeAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
-
- logger.info("start primary shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
-
- assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1));
- assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(1));
- assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).get(0).allocationId().getId(),
- equalTo(clusterState.metaData().index("test").activeAllocationIds(0).iterator().next()));
- assertThat(clusterState.metaData().index("test-old").activeAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
-
- logger.info("start replica shards");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
-
- assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(3));
-
- logger.info("remove a node");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
- .remove("node1"))
- .build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
-
- assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2));
-
- logger.info("remove all remaining nodes");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
- .remove("node2").remove("node3"))
- .build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
-
- // active allocation ids should not be updated
- assertThat(clusterState.getRoutingTable().shardsWithState(UNASSIGNED).size(), equalTo(3));
- assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2));
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
index 687343b16d..ed7a944963 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
@@ -20,8 +20,10 @@
package org.elasticsearch.cluster.routing.allocation;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -30,11 +32,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
@@ -43,9 +43,11 @@ import java.util.Collections;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
public class AddIncrementallyTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(AddIncrementallyTests.class);
+ private final Logger logger = Loggers.getLogger(AddIncrementallyTests.class);
public void testAddNodesAndIndices() {
Settings.Builder settings = Settings.builder();
@@ -53,40 +55,40 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
AllocationService service = createAllocationService(settings.build());
ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(9));
int nodeOffset = 1;
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(0));
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, equalTo(3));
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, equalTo(2));
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
assertAtLeastOneIndexShardPerNode(clusterState);
clusterState = removeNodes(clusterState, service, 1);
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, equalTo(2));
clusterState = addIndex(clusterState, service, 3, 2, 3);
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(2));
- assertNumIndexShardsPerNode(clusterState, "test3", Matchers.equalTo(2));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, "test3", equalTo(2));
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
clusterState = addIndex(clusterState, service, 4, 2, 3);
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(4));
- assertNumIndexShardsPerNode(clusterState, "test4", Matchers.equalTo(2));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, "test4", equalTo(2));
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
clusterState = removeNodes(clusterState, service, 1);
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(4));
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
}
@@ -97,65 +99,57 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
AllocationService service = createAllocationService(settings.build());
ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(9));
int nodeOffset = 1;
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(0));
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, equalTo(3));
logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
nodes.add(newNode("node2"));
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
- RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = service.reroute(clusterState, "reroute");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
- RoutingTable prev = routingTable;
- logger.error(clusterState.prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(4));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- assertThat(prev, Matchers.sameInstance(routingTable));
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, equalTo(clusterState));
+ assertNumIndexShardsPerNode(clusterState, equalTo(2));
logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
}
@@ -167,65 +161,57 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
AllocationService service = createAllocationService(settings.build());
ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(9));
int nodeOffset = 1;
clusterState = addNodes(clusterState, service, 1, nodeOffset++);
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
- assertThat(clusterState.getRoutingNodes().unassigned().size(), Matchers.equalTo(0));
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, equalTo(3));
logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
nodes.add(newNode("node2"));
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
- RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = service.reroute(clusterState, "reroute");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
- RoutingTable prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(4));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
- assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
- assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
-
- prev = routingTable;
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- assertThat(prev, Matchers.sameInstance(routingTable));
- assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, equalTo(clusterState));
+ assertNumIndexShardsPerNode(clusterState, equalTo(2));
logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
}
@@ -262,24 +248,10 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
- RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.reroute(clusterState, "reroute");
// move initializing to started
-
- RoutingTable prev = routingTable;
- while (true) {
- logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, service);
}
private ClusterState initCluster(AllocationService service, int numberOfNodes, int numberOfIndices, int numberOfShards,
@@ -299,43 +271,27 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
routingTableBuilder.addAsNew(cursor.value);
}
- RoutingTable routingTable = routingTableBuilder.build();
+ RoutingTable initialRoutingTable = routingTableBuilder.build();
logger.info("start {} nodes", numberOfNodes);
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes; i++) {
nodes.add(newNode("node" + i));
}
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
- routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build();
+ clusterState = service.reroute(clusterState, "reroute");
logger.info("restart all the primary shards, replicas will start initializing");
- routingNodes = clusterState.getRoutingNodes();
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("start the replica shards");
routingNodes = clusterState.getRoutingNodes();
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- while (true) {
- logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, service);
}
private ClusterState addIndex(ClusterState clusterState, AllocationService service, int indexOrdinal, int numberOfShards,
@@ -350,37 +306,19 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
routingTableBuilder.addAsNew(imd);
MetaData metaData = metaDataBuilder.build();
- RoutingTable routingTable = routingTableBuilder.build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
- routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTableBuilder.build()).build();
+ clusterState = service.reroute(clusterState, "reroute");
logger.info("restart all the primary shards, replicas will start initializing");
- routingNodes = clusterState.getRoutingNodes();
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("start the replica shards");
routingNodes = clusterState.getRoutingNodes();
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- while (true) {
- logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, service);
}
private ClusterState removeNodes(ClusterState clusterState, AllocationService service, int numNodes) {
@@ -397,36 +335,21 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
}
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
- clusterState = ClusterState.builder(clusterState)
- .routingResult(service.deassociateDeadNodes(clusterState, true, "reroute")).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.deassociateDeadNodes(clusterState, true, "reroute");
logger.info("start all the primary shards, replicas will start initializing");
- RoutingTable routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("start the replica shards");
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("rebalancing");
- routingTable = service.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = service.reroute(clusterState, "reroute");
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- while (true) {
- logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
+ clusterState = applyStartedShardsUntilNoChange(clusterState, service);
return clusterState;
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
index 354b18d0b2..7dfa49455b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -41,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -51,7 +52,8 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
-import org.elasticsearch.test.ESAllocationTestCase;
+
+import java.util.Collections;
import static java.util.Collections.singleton;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -59,12 +61,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
/**
*/
public class AllocationCommandsTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(AllocationCommandsTests.class);
+ private final Logger logger = Loggers.getLogger(AllocationCommandsTests.class);
public void testMoveShardCommand() {
AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -80,12 +83,10 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
logger.info("adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
logger.info("start primary shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("move the shard");
String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
@@ -95,15 +96,15 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
} else {
toNodeId = "node1";
}
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ ClusterState newState = allocation.reroute(clusterState,
+ new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING));
assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
logger.info("finish moving the shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node(existingNodeId).isEmpty(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
@@ -126,7 +127,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder()
- .put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)
+ .putInSyncAllocationIds(0, Collections.singleton("asdf")).putInSyncAllocationIds(1, Collections.singleton("qwertz")))
.build();
// shard routing is added as "from recovery" instead of "new index creation" so that we can test below that allocating an empty
// primary with accept_data_loss flag set to false fails
@@ -143,8 +145,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
.add(newNode("node3"))
.add(newNode("node4", singleton(DiscoveryNode.Role.MASTER)))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
logger.info("--> allocating to non-existent node, should fail");
@@ -196,16 +197,16 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
}
logger.info("--> allocating empty primary with acceptDataLoss flag set to true");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ ClusterState newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> start the primary shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
@@ -218,9 +219,10 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
}
logger.info("--> allocate the replica shard on on the second node");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
@@ -228,8 +230,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
logger.info("--> start the replica shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
@@ -264,14 +265,14 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
.add(newNode("node2"))
.add(newNode("node3"))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
logger.info("--> allocating empty primary shard with accept_data_loss flag set to true");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ ClusterState newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
@@ -284,8 +285,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
}
logger.info("--> start the primary shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
@@ -298,27 +298,30 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
}
logger.info("--> allocate the replica shard on on the second node");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> cancel the relocation allocation");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
logger.info("--> allocate the replica shard on on the second node");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
@@ -332,41 +335,41 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
}
logger.info("--> start the replica shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
logger.info("--> cancel allocation of the replica shard");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
logger.info("--> allocate the replica shard on on the second node");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
- assertThat(rerouteResult.changed(), equalTo(true));
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the replica shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
logger.info("--> move the replica shard");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState,
+ new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), false, false).getClusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
@@ -376,24 +379,25 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
if (randomBoolean()) {
logger.info("--> cancel the primary allocation (with allow_primary set to true)");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true)), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
- assertThat(rerouteResult.changed(), equalTo(true));
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).iterator().next().primary(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
} else {
logger.info("--> cancel the move of the replica shard");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node3", false)), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node3", false)), false, false).getClusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
logger.info("--> move the replica shard again");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState,
+ new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), false, false).getClusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
@@ -402,8 +406,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> cancel the source replica shard");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false)), false, false).getClusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
@@ -412,17 +416,17 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(INITIALIZING).get(0).relocatingNodeId(), nullValue());
logger.info("--> start the former target replica shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
- assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(STARTED).size(), equalTo(1));
logger.info("--> cancel the primary allocation (with allow_primary set to true)");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true)), false, false);
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
- assertThat(rerouteResult.changed(), equalTo(true));
+ newState = allocation.reroute(clusterState,
+ new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true)), false, false).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(STARTED).iterator().next().primary(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
@@ -442,8 +446,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
StreamInput in = bytes.bytes().streamInput();
// Since the commands are named writeable we need to register them and wrap the input stream
- NetworkModule networkModule = new NetworkModule(null, Settings.EMPTY, true);
- NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
+ NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables());
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
// Now we can read them!
@@ -489,7 +492,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
// move two tokens, parser expected to be "on" `commands` field
parser.nextToken();
parser.nextToken();
- AllocationCommandRegistry registry = new NetworkModule(null, Settings.EMPTY, true).getAllocationCommandRegistry();
+ AllocationCommandRegistry registry = NetworkModule.getAllocationCommandRegistry();
AllocationCommands sCommands = AllocationCommands.fromXContent(parser, ParseFieldMatcher.STRICT, registry);
assertThat(sCommands.commands().size(), equalTo(5));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java
index 9b5b8db7ac..684985801c 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java
@@ -20,13 +20,13 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -61,36 +61,31 @@ public class AllocationPriorityTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("first").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_PRIORITY, priorityFirst)).numberOfShards(2).numberOfReplicas(1))
.put(IndexMetaData.builder("second").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_PRIORITY, prioritySecond)).numberOfShards(2).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("first"))
.addAsNew(metaData.index("second"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
- routingTable = allocation.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertEquals(2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size());
assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName());
assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName());
- routingTable = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertEquals(2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size());
assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName());
assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName());
- routingTable = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertEquals(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).toString(),2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size());
assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName());
assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName());
- routingTable = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertEquals(2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size());
assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName());
assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName());
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
index 13ade5265a..e7eacf94f9 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -31,10 +33,8 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.sameInstance;
*/
public class AwarenessAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(AwarenessAllocationTests.class);
public void testMoveShardOnceNewNodeWithAttributeAdded1() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -64,28 +64,23 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -93,16 +88,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -113,9 +106,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(routingTable, sameInstance(clusterState.routingTable()));
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
}
@@ -132,11 +124,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -144,17 +134,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.add(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node3", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -162,16 +149,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -182,9 +167,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node5", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(routingTable, sameInstance(clusterState.routingTable()));
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
}
@@ -206,19 +190,18 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Initializing shards: {}", clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("Started shards: {}", clusterState.getRoutingNodes().shardsWithState(STARTED));
@@ -228,12 +211,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
@@ -241,8 +222,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5));
@@ -250,12 +230,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
logger.info("--> complete initializing");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> run it again, since we still might have relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
@@ -266,13 +244,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
@@ -296,29 +272,26 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
@@ -326,8 +299,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10));
@@ -337,11 +309,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> complete initializing");
for (int i = 0; i < 2; i++) {
logger.info("--> complete initializing round: [{}]", i);
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- }
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+ }
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(10));
@@ -355,16 +325,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
logger.info("--> complete relocation");
for (int i = 0; i < 2; i++) {
logger.info("--> complete initializing round: [{}]", i);
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- }
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+ }
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(5));
assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(5));
@@ -388,28 +356,25 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -417,16 +382,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
@@ -437,15 +400,13 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
@@ -466,11 +427,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(3))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -479,17 +440,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.add(newNode("node3", singletonMap("rack_id", "1")))
.add(newNode("node4", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
@@ -497,16 +455,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node5", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
@@ -517,15 +473,13 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node6", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
@@ -547,24 +501,22 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> replica will not start because we have only one rack value");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
@@ -574,16 +526,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -594,9 +544,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(routingTable, sameInstance(clusterState.routingTable()));
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
}
@@ -614,11 +563,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -626,13 +575,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.add(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node3", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> replica will not start because we have only one rack value");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
@@ -642,16 +589,14 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4"));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
@@ -662,9 +607,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node5", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(routingTable, sameInstance(clusterState.routingTable()));
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
}
@@ -688,25 +632,23 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
@@ -714,20 +656,17 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
logger.info("--> complete initializing");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> run it again, since we still might have relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
@@ -738,13 +677,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
logger.info("--> complete relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
@@ -768,30 +705,27 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes in different zones and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("A-0", singletonMap("zone", "a")))
.add(newNode("B-0", singletonMap("zone", "b")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(5));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> all replicas are allocated and started since we have on node in each zone");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(10));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
@@ -800,15 +734,13 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("A-1", singletonMap("zone", "a")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("A-1"));
logger.info("--> starting initializing shards on the new node");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
assertThat(clusterState.getRoutingNodes().node("A-1").size(), equalTo(2));
@@ -829,11 +761,11 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(4))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding 5 nodes in different zones and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -844,14 +776,12 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
.add(newNode("A-4", singletonMap("zone", "a")))
.add(newNode("B-0", singletonMap("zone", "b")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shard (primary)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); // Unassigned shard is expected.
@@ -859,7 +789,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
// Cancel all initializing shards and move started primary to another node.
AllocationCommands commands = new AllocationCommands();
String primaryNode = null;
- for (ShardRouting routing : routingTable.allShards()) {
+ for (ShardRouting routing : clusterState.routingTable().allShards()) {
if (routing.primary()) {
primaryNode = routing.currentNodeId();
} else if (routing.initializing()) {
@@ -868,8 +798,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
}
commands.add(new MoveAllocationCommand("test", 0, primaryNode, "A-4"));
- routingTable = strategy.reroute(clusterState, commands, false, false).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, commands, false, false).getClusterState();
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(1));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
index 12b37a3215..e2a360b93f 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
@@ -20,9 +20,11 @@
package org.elasticsearch.cluster.routing.allocation;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -36,15 +38,15 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.hamcrest.Matchers;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -52,7 +54,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
public class BalanceConfigurationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class);
+ private final Logger logger = Loggers.getLogger(BalanceConfigurationTests.class);
// TODO maybe we can randomize these numbers somehow
final int numberOfNodes = 25;
final int numberOfIndices = 12;
@@ -71,7 +73,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance);
settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold);
- AllocationService strategy = createAllocationService(settings.build());
+ AllocationService strategy = createAllocationService(settings.build(), new NoopGatewayAllocator());
ClusterState clusterState = initCluster(strategy);
assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
@@ -81,7 +83,6 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
clusterState = removeNodes(clusterState, strategy);
assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
-
}
public void testReplicaBalance() {
@@ -96,7 +97,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance);
settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold);
- AllocationService strategy = createAllocationService(settings.build());
+ AllocationService strategy = createAllocationService(settings.build(), new NoopGatewayAllocator());
ClusterState clusterState = initCluster(strategy);
assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
@@ -124,7 +125,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
routingTableBuilder.addAsNew(cursor.value);
}
- RoutingTable routingTable = routingTableBuilder.build();
+ RoutingTable initialRoutingTable = routingTableBuilder.build();
logger.info("start " + numberOfNodes + " nodes");
@@ -132,35 +133,19 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
for (int i = 0; i < numberOfNodes; i++) {
nodes.add(newNode("node" + i));
}
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("restart all the primary shards, replicas will start initializing");
- routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("start the replica shards");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- while (true) {
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, strategy);
}
private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
@@ -171,21 +156,9 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
RoutingTable routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
// move initializing to started
-
- RoutingTable prev = routingTable;
- while (true) {
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, strategy);
}
private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) {
@@ -200,43 +173,26 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
if (removed) {
- clusterState = ClusterState.builder(clusterState).routingResult(
- strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes")
- ).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes");
}
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
logger.info("start all the primary shards, replicas will start initializing");
- RoutingTable routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("start the replica shards");
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("rebalancing");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- while (true) {
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
- if (routingTable == prev)
- break;
- prev = routingTable;
- }
-
- return clusterState;
+ return applyStartedShardsUntilNoChange(clusterState, strategy);
}
- private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+ private void assertReplicaBalance(Logger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1);
final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
@@ -300,7 +256,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
Settings.Builder settings = Settings.builder();
AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(),
new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()),
- NoopGatewayAllocator.INSTANCE, new ShardsAllocator() {
+ new TestGatewayAllocator(), new ShardsAllocator() {
public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
return new HashMap<DiscoveryNode, Float>();
@@ -397,7 +353,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
}
}
- strategy = createAllocationService(settings.build());
+ strategy = createAllocationService(settings.build(), new NoopGatewayAllocator());
logger.info("use the new allocator and check if it moves shards");
routingNodes = clusterState.getRoutingNodes();
@@ -431,7 +387,26 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
}
}
-
}
+ private class NoopGatewayAllocator extends GatewayAllocator {
+
+ public NoopGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
+ // noop
+ }
+
+ @Override
+ public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
+ // noop
+ }
+ @Override
+ public void allocateUnassigned(RoutingAllocation allocation) {
+ // noop
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java
index bcefe46884..254ba81f93 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java
@@ -60,22 +60,20 @@ public class BalanceUnbalancedClusterTests extends CatAllocationTestCase {
.put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder(state.routingTable())
+ RoutingTable initialRoutingTable = RoutingTable.builder(state.routingTable())
.addAsNew(metaData.index(index))
.build();
- ClusterState clusterState = ClusterState.builder(state).metaData(metaData).routingTable(routingTable).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(state).metaData(metaData).routingTable(initialRoutingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
while (true) {
- if (routingTable.shardsWithState(INITIALIZING).isEmpty()) {
+ if (clusterState.routingTable().shardsWithState(INITIALIZING).isEmpty()) {
break;
}
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
}
Map<String, Integer> counts = new HashMap<>();
- for (IndexShardRoutingTable table : routingTable.index(index)) {
+ for (IndexShardRoutingTable table : clusterState.routingTable().index(index)) {
for (ShardRouting r : table) {
String s = r.currentNodeId();
Integer count = counts.get(s);
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
index 8640868bd2..db707c5478 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
@@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.io.BufferedReader;
import java.io.IOException;
@@ -66,7 +66,8 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
try (BufferedReader reader = Files.newBufferedReader(getCatPath(), StandardCharsets.UTF_8)) {
String line = null;
// regexp FTW
- Pattern pattern = Pattern.compile("^(.+)\\s+(\\d)\\s+([rp])\\s+(STARTED|RELOCATING|INITIALIZING|UNASSIGNED)\\s+\\d+\\s+[0-9.a-z]+\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+).*$");
+ Pattern pattern = Pattern.compile("^(.+)\\s+(\\d)\\s+([rp])\\s+(STARTED|RELOCATING|INITIALIZING|UNASSIGNED)" +
+ "\\s+\\d+\\s+[0-9.a-z]+\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+).*$");
while((line = reader.readLine()) != null) {
final Matcher matcher;
if ((matcher = pattern.matcher(line)).matches()) {
@@ -81,7 +82,7 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
ShardRoutingState state = ShardRoutingState.valueOf(matcher.group(4));
String ip = matcher.group(5);
nodes.add(ip);
- ShardRouting routing = TestShardRouting.newShardRouting(index, shard, ip, null, null, primary, state);
+ ShardRouting routing = TestShardRouting.newShardRouting(index, shard, ip, null, primary, state);
idx.add(routing);
logger.debug("Add routing {}", routing);
} else {
@@ -95,7 +96,21 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
MetaData.Builder builder = MetaData.builder();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for(Idx idx : indices.values()) {
- IndexMetaData idxMeta = IndexMetaData.builder(idx.name).settings(settings(Version.CURRENT)).numberOfShards(idx.numShards()).numberOfReplicas(idx.numReplicas()).build();
+ IndexMetaData.Builder idxMetaBuilder = IndexMetaData.builder(idx.name).settings(settings(Version.CURRENT))
+ .numberOfShards(idx.numShards()).numberOfReplicas(idx.numReplicas());
+ for (ShardRouting shardRouting : idx.routing) {
+ if (shardRouting.active()) {
+ Set<String> allocationIds = idxMetaBuilder.getInSyncAllocationIds(shardRouting.id());
+ if (allocationIds == null) {
+ allocationIds = new HashSet<>();
+ } else {
+ allocationIds = new HashSet<>(allocationIds);
+ }
+ allocationIds.add(shardRouting.allocationId().getId());
+ idxMetaBuilder.putInSyncAllocationIds(shardRouting.id(), allocationIds);
+ }
+ }
+ IndexMetaData idxMeta = idxMetaBuilder.build();
builder.put(idxMeta, false);
IndexRoutingTable.Builder tableBuilder = new IndexRoutingTable.Builder(idxMeta.getIndex()).initializeAsRecovery(idxMeta);
Map<Integer, IndexShardRoutingTable> shardIdToRouting = new HashMap<>();
@@ -105,7 +120,6 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
refData = new IndexShardRoutingTable.Builder(shardIdToRouting.get(r.getId())).addShard(r).build();
}
shardIdToRouting.put(r.getId(), refData);
-
}
for (IndexShardRoutingTable t: shardIdToRouting.values()) {
tableBuilder.addIndexShard(t);
@@ -120,7 +134,8 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
for (String node : nodes) {
builderDiscoNodes.add(newNode(node));
}
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
+ .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build();
if (balanceFirst()) {
clusterState = rebalance(clusterState);
}
@@ -134,22 +149,18 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
}
private ClusterState rebalance(ClusterState clusterState) {
- RoutingTable routingTable;AllocationService strategy = createAllocationService(Settings.builder()
+ AllocationService strategy = createAllocationService(Settings.builder()
.build());
- RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute");
- routingTable = reroute.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingTable = clusterState.routingTable();
+ clusterState = strategy.reroute(clusterState, "reroute");
int numRelocations = 0;
while (true) {
- List<ShardRouting> initializing = routingTable.shardsWithState(INITIALIZING);
+ List<ShardRouting> initializing = clusterState.routingTable().shardsWithState(INITIALIZING);
if (initializing.isEmpty()) {
break;
}
logger.debug("Initializing shards: {}", initializing);
numRelocations += initializing.size();
- routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, initializing);
}
logger.debug("--> num relocations to get balance: {}", numRelocations);
return clusterState;
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
index a4227ea7e4..8cccdb08fb 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -29,11 +31,9 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -43,9 +43,10 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
public void testAlways() {
AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
@@ -56,76 +57,67 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
-// assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+// assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
@@ -142,95 +134,84 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ clusterState = strategy.reroute(clusterState, "reroute");
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
@@ -246,76 +227,68 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
@@ -330,114 +303,102 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("start the test2 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
@@ -453,76 +414,68 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
@@ -537,95 +490,85 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
- assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test2").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
@@ -634,7 +577,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
public void testRebalanceWithIgnoredUnassignedShards() {
final AtomicBoolean allocateTest1 = new AtomicBoolean(false);
- AllocationService strategy = createAllocationService(Settings.EMPTY, new NoopGatewayAllocator() {
+ AllocationService strategy = createAllocationService(Settings.EMPTY, new TestGatewayAllocator() {
@Override
public void allocateUnassigned(RoutingAllocation allocation) {
if (allocateTest1.get() == false) {
@@ -657,31 +600,28 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.addAsNew(metaData.index("test1"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("start all the primary shards for test");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
logger.debug("now, start 1 more node, check that rebalancing will not happen since we unassigned shards");
@@ -689,46 +629,43 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.add(newNode("node2")))
.build();
logger.debug("reroute and check that nothing has changed");
- RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute");
- assertFalse(reroute.changed());
- routingTable = reroute.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState resultingState = strategy.reroute(clusterState, "reroute");
+ assertThat(resultingState, equalTo(clusterState));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
logger.debug("now set allocateTest1 to true and reroute we should see the [test1] index initializing");
allocateTest1.set(true);
- reroute = strategy.reroute(clusterState, "reroute");
- assertTrue(reroute.changed());
- routingTable = reroute.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ resultingState = strategy.reroute(clusterState, "reroute");
+ assertThat(resultingState, not(equalTo(clusterState)));
+ clusterState = resultingState;
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("now start initializing shards and expect exactly one rebalance from node1 to node 2 since index [test] is all on node1");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
}
int numStarted = 0;
int numRelocating = 0;
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- if (routingTable.index("test").shard(i).primaryShard().state() == STARTED) {
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == STARTED) {
numStarted++;
- } else if (routingTable.index("test").shard(i).primaryShard().state() == RELOCATING) {
+ } else if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == RELOCATING) {
numRelocating++;
}
}
@@ -740,7 +677,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
public void testRebalanceWhileShardFetching() {
final AtomicBoolean hasFetches = new AtomicBoolean(true);
AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
- ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new NoopGatewayAllocator() {
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new TestGatewayAllocator() {
@Override
public void allocateUnassigned(RoutingAllocation allocation) {
if (hasFetches.get()) {
@@ -756,31 +693,28 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.build();
// we use a second index here (test1) that never gets assigned otherwise allocateUnassigned is never called if we don't have unassigned shards.
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.addAsNew(metaData.index("test1"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("start all the primary shards for test");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on");
@@ -788,41 +722,38 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
.add(newNode("node2")))
.build();
logger.debug("reroute and check that nothing has changed");
- RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute");
- assertFalse(reroute.changed());
- routingTable = reroute.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState resultState = strategy.reroute(clusterState, "reroute");
+ assertThat(resultState, equalTo(clusterState));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
logger.debug("now set hasFetches to true and reroute we should now see exactly one relocating shard");
hasFetches.set(false);
- reroute = strategy.reroute(clusterState, "reroute");
- assertTrue(reroute.changed());
- routingTable = reroute.routingTable();
+ resultState = strategy.reroute(clusterState, "reroute");
+ assertThat(resultState, not(equalTo(clusterState)));
+ clusterState = resultState;
int numStarted = 0;
int numRelocating = 0;
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
- if (routingTable.index("test").shard(i).primaryShard().state() == STARTED) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1));
+ if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == STARTED) {
numStarted++;
- } else if (routingTable.index("test").shard(i).primaryShard().state() == RELOCATING) {
+ } else if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == RELOCATING) {
numRelocating++;
}
}
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
assertEquals(numStarted, 1);
assertEquals(numRelocating, 1);
-
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
index 6b330fa738..aa7be906a6 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
@@ -19,17 +19,17 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class);
public void testClusterConcurrentRebalance() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -53,96 +53,83 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(5));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10")))
.build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("start the replica shards, rebalancing should start, but, only 3 should be rebalancing");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// we only allow one relocation at a time
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3));
logger.info("finalize this session relocation, 3 more should relocate now");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// we only allow one relocation at a time
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3));
logger.info("finalize this session relocation, 2 more should relocate now");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// we only allow one relocation at a time
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(2));
logger.info("finalize this session relocation, no more relocation");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// we only allow one relocation at a time
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(0));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
index 914b6a5d91..9076dde19f 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -28,20 +30,19 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
/**
*/
public class DeadNodesAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
public void testSimpleDeadNodeOnStartedPrimaryShard() {
AllocationService allocation = createAllocationService(Settings.builder()
@@ -64,15 +65,12 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode("node2"))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> verifying all is allocated");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
@@ -87,8 +85,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode(nodeIdRemaining))
).build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().state(), equalTo(STARTED));
@@ -115,15 +112,12 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode("node2"))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> verifying all is allocated");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
@@ -135,8 +129,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3"))
).build();
- rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").iterator().next().state(), equalTo(STARTED));
@@ -148,11 +141,11 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")),
false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(commandsResult.getClusterState(), not(equalTo(clusterState)));
+ clusterState = commandsResult.getClusterState();
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
@@ -161,8 +154,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode(origPrimaryNodeId))
.add(newNode(origReplicaNodeId))
).build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED));
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));
@@ -189,15 +181,12 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode("node2"))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> verifying all is allocated");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
@@ -209,8 +198,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3"))
).build();
- rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").iterator().next().state(), equalTo(STARTED));
@@ -222,11 +210,11 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand("test",0 , clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")),
false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(commandsResult.getClusterState(), not(equalTo(clusterState)));
+ clusterState = commandsResult.getClusterState();
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
@@ -235,8 +223,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
.add(newNode("node3"))
.add(newNode(origReplicaNodeId))
).build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java
index bcf5be90d9..bee2275743 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
@@ -38,8 +39,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.io.IOException;
import java.util.Collections;
@@ -161,7 +161,7 @@ public class DecisionsImpactOnClusterHealthTests extends ESAllocationTestCase {
private static AllocationService newAllocationService(Settings settings, Set<AllocationDecider> deciders) {
return new AllocationService(settings,
new AllocationDeciders(settings, deciders),
- NoopGatewayAllocator.INSTANCE,
+ new TestGatewayAllocator(),
new BalancedShardsAllocator(settings),
EmptyClusterInfoService.INSTANCE);
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
index f3aa1a2652..77e83fd665 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
@@ -19,28 +19,29 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
/**
*
*/
public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
+ private final Logger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
public void testElectReplicaAsPrimaryDuringRelocation() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -51,61 +52,52 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("Start the replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ ClusterState resultingState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(resultingState, not(equalTo(clusterState)));
+ clusterState = resultingState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ routingNodes = clusterState.getRoutingNodes();
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
logger.info("Start another node and perform rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("find the replica shard that gets relocated");
IndexShardRoutingTable indexShardRoutingTable = null;
- if (routingTable.index("test").shard(0).replicaShards().get(0).relocating()) {
- indexShardRoutingTable = routingTable.index("test").shard(0);
- } else if (routingTable.index("test").shard(1).replicaShards().get(0).relocating()) {
- indexShardRoutingTable = routingTable.index("test").shard(1);
+ if (clusterState.routingTable().index("test").shard(0).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = clusterState.routingTable().index("test").shard(0);
+ } else if (clusterState.routingTable().index("test").shard(1).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = clusterState.routingTable().index("test").shard(1);
}
// we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation
if (indexShardRoutingTable != null) {
logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
logger.info("make sure all the primary shards are active");
- assertThat(routingTable.index("test").shard(0).primaryShard().active(), equalTo(true));
- assertThat(routingTable.index("test").shard(1).primaryShard().active(), equalTo(true));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().active(), equalTo(true));
+ assertThat(clusterState.routingTable().index("test").shard(1).primaryShard().active(), equalTo(true));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java
index b6a934b066..873c71f19b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java
@@ -19,10 +19,12 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -32,18 +34,17 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
/**
*/
public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class);
public void testInitializingHasExpectedSize() {
final long byteSize = randomIntBetween(0, Integer.MAX_VALUE);
@@ -81,23 +82,20 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING));
assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize());
logger.info("Start the primary shard");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED));
assertEquals(1, clusterState.getRoutingNodes().unassigned().size());
logger.info("Add another one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertEquals(1, clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING));
assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize());
@@ -134,12 +132,10 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
logger.info("adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
logger.info("start primary shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("move the shard");
String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
@@ -149,9 +145,10 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
} else {
toNodeId = "node1";
}
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ AllocationService.CommandsResult commandsResult =
+ allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false);
+ assertThat(commandsResult.getClusterState(), not(equalTo(clusterState)));
+ clusterState = commandsResult.getClusterState();
assertEquals(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), ShardRoutingState.RELOCATING);
assertEquals(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(),ShardRoutingState.INITIALIZING);
@@ -159,8 +156,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
assertEquals(clusterState.getRoutingNodes().node(toNodeId).iterator().next().getExpectedShardSize(), byteSize);
logger.info("finish moving the shard");
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node(existingNodeId).isEmpty(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
index 2dae0c6c2d..61a28897d5 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -28,17 +30,15 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.hamcrest.Matchers.equalTo;
public class FailedNodeRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
public void testSimpleFailedNodeTest() {
AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
@@ -49,31 +49,25 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("start 4 nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
logger.info("start the replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
@@ -85,13 +79,11 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
logger.info("remove 2 nodes where primaries are allocated, reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
- .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
- .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ .remove(clusterState.routingTable().index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(clusterState.routingTable().index("test2").shard(0).primaryShard().currentNodeId())
)
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
routingNodes = clusterState.getRoutingNodes();
for (RoutingNode routingNode : routingNodes) {
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
index e66e35635e..667ae850bf 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -30,16 +32,16 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.index.shard.ShardId;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
+import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -54,7 +56,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class FailedShardsRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
public void testFailedShardPrimaryRelocatingToAndFrom() {
AllocationService allocation = createAllocationService(Settings.builder()
@@ -69,7 +71,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -77,15 +79,12 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.add(newNode("node2"))
).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
// starting primaries
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// starting replicas
- rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> verifying all is allocated");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
@@ -97,8 +96,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3"))
).build();
- rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").iterator().next().state(), equalTo(STARTED));
@@ -110,33 +108,31 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")),
false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(commandsResult.getClusterState(), not(equalTo(clusterState)));
+ clusterState = commandsResult.getClusterState();
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on node3 being initialized");
- rerouteResult = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node("node3").iterator().next());
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node("node3").iterator().next());
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
logger.info("--> moving primary shard to node3");
- rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ commandsResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")),
false, false);
- assertThat(rerouteResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(commandsResult.getClusterState(), not(equalTo(clusterState)));
+ clusterState = commandsResult.getClusterState();
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING));
assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on node1 being relocated");
- rerouteResult = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next());
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next());
// check promotion of replica to primary
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));
@@ -156,70 +152,65 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the shards (primaries)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+
+ ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Start the shards (backups)");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned");
- ShardRouting shardToFail = routingTable.index("test").shard(0).primaryShard();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ ShardRouting shardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
+ newState = strategy.applyFailedShard(clusterState, shardToFail);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
public void testFirstAllocationFailureSingleNode() {
@@ -234,44 +225,42 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding single node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned");
- prevRoutingTable = routingTable;
ShardRouting firstShard = clusterState.getRoutingNodes().node("node1").iterator().next();
- routingTable = strategy.applyFailedShard(clusterState, firstShard).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ newState = strategy.applyFailedShard(clusterState, firstShard);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
}
@@ -287,11 +276,11 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder();
@@ -301,17 +290,13 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) {
// start all initializing
- clusterState = ClusterState.builder(clusterState)
- .routingTable(strategy
- .applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)).routingTable()
- )
- .build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
// and assign more unassigned
- clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
}
int shardsToFail = randomIntBetween(1, numberOfReplicas);
- ArrayList<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>();
+ ArrayList<FailedShard> failedShards = new ArrayList<>();
RoutingNodes routingNodes = clusterState.getRoutingNodes();
Set<String> failedNodes = new HashSet<>();
Set<ShardRouting> shardRoutingsToFail = new HashSet<>();
@@ -320,18 +305,17 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
logger.info("failing shard on node [{}]", failedNode);
ShardRouting shardToFail = routingNodes.node(failedNode).iterator().next();
if (shardRoutingsToFail.contains(shardToFail) == false) {
- failedShards.add(new FailedRerouteAllocation.FailedShard(shardToFail, null, null));
+ failedShards.add(new FailedShard(shardToFail, null, null));
failedNodes.add(failedNode);
shardRoutingsToFail.add(shardToFail);
}
}
- routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable();
-
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyFailedShards(clusterState, failedShards);
routingNodes = clusterState.getRoutingNodes();
- for (FailedRerouteAllocation.FailedShard failedShard : failedShards) {
- if (routingNodes.getByAllocationId(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId()) != null) {
+ for (FailedShard failedShard : failedShards) {
+ if (routingNodes.getByAllocationId(failedShard.getRoutingEntry().shardId(),
+ failedShard.getRoutingEntry().allocationId().getId()) != null) {
fail("shard " + failedShard + " was not failed");
}
}
@@ -355,48 +339,46 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(clusterState));
+ clusterState = newState;
+ final String nodeHoldingPrimary = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the first shard, will start INITIALIZING on the second node");
- prevRoutingTable = routingTable;
final ShardRouting firstShard = clusterState.getRoutingNodes().node(nodeHoldingPrimary).iterator().next();
- routingTable = strategy.applyFailedShard(clusterState, firstShard).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
+ newState = strategy.applyFailedShard(clusterState, firstShard);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- final String nodeHoldingPrimary2 = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+ final String nodeHoldingPrimary2 = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
assertThat(nodeHoldingPrimary2, not(equalTo(nodeHoldingPrimary)));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
}
@@ -412,63 +394,58 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the shards (primaries)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(2));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Start the shards (backups)");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(2));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Adding third node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
@@ -478,14 +455,12 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
logger.info("Fail the shards on node 3");
ShardRouting shardToFail = routingNodes.node("node3").iterator().next();
- routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyFailedShard(clusterState, shardToFail);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
@@ -507,7 +482,10 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metaData(metaData).routingTable(routingTable).build();
+
+ ShardId shardId = new ShardId(metaData.index("test").getIndex(), 0);
// add 4 nodes
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
@@ -515,26 +493,30 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
// start primary shards
- clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
- // fail the primary shard, check replicas get removed as well...
- ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
- RoutingAllocation.Result routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
- assertThat(routingResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(routingResult.routingTable()).build();
- // the primary gets allocated on another node, replicas are unassigned
+ // start one replica so it can take over.
+ clusterState = allocation.applyStartedShards(clusterState,
+ Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0)));
+ assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
+ ShardRouting startedReplica = clusterState.getRoutingNodes().activeReplica(shardId);
- ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
- assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));
- // start the primary shard
- clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ // fail the primary shard, check replicas get removed as well...
+ ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
+ ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ // the primary gets allocated on another node, replicas are initializing
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
+ assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));
+ assertThat(newPrimaryShard.allocationId(), equalTo(startedReplica.allocationId()));
}
public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToElect() {
@@ -549,28 +531,28 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
// add 4 nodes
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
- clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
// start primary shards
- clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
// start another replica shard, while keep one initializing
- clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build();
+ clusterState = allocation.applyStartedShards(clusterState, Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0)));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
// fail the primary shard, check one replica gets elected to primary, others become INITIALIZING (from it)
ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
- RoutingAllocation.Result routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
- assertThat(routingResult.changed(), equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(routingResult.routingTable()).build();
+ ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java
index a1f5f92e0c..3a792ae991 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java
@@ -20,35 +20,45 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotId;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
+import java.util.Arrays;
import java.util.Collections;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_NAME;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_UUID;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
public class FilterAllocationDeciderTests extends ESAllocationTestCase {
- public void testFilterInitialAllocation() {
+ public void testFilterInitialRecovery() {
FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY,
- Collections.singleton(filterAllocationDecider));
+ Arrays.asList(filterAllocationDecider,
+ new SameShardAllocationDecider(Settings.EMPTY), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY)));
AllocationService service = new AllocationService(Settings.builder().build(), allocationDeciders,
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id",
"node2").build());
RoutingTable routingTable = state.routingTable();
@@ -61,65 +71,106 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
assertNull(routingTable.index("idx").shard(0).shards().get(0).currentNodeId());
+ // after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, 0, false);
- assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0),
+ assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(),
state.getRoutingNodes().node("node2")
- ,allocation), Decision.YES);
- assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0),
+ , allocation), Decision.YES);
+ assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(),
state.getRoutingNodes().node("node1")
- ,allocation), Decision.NO);
+ , allocation), Decision.NO);
- // after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node
- state = stateFromResult(state, service.reroute(state, "try allocate again"));
+ state = service.reroute(state, "try allocate again");
routingTable = state.routingTable();
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
+ assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
+ assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node2");
- state = stateFromResult(state, service.applyStartedShards(state, routingTable.index("idx").shard(0).shards()));
+ state = service.applyStartedShards(state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING));
routingTable = state.routingTable();
// ok now we are started and can be allocated anywhere!! lets see...
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), STARTED);
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
- assertTrue(routingTable.index("idx").shard(0).shards().get(0).allocatedPostIndexCreate(state.getMetaData().index("idx")));
+ // first create another copy
+ assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), INITIALIZING);
+ assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
+ state = service.applyStartedShards(state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING));
+ routingTable = state.routingTable();
+ assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), STARTED);
+ assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
+
+ // now remove the node of the other copy and fail the current
+ DiscoveryNode node1 = state.nodes().resolveNode("node1");
+ state = service.deassociateDeadNodes(
+ ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(),
+ true, "test");
+ state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard());
- // we fail it again to check if we are initializing immediately on the other node
- state = stateFromResult(state, service.applyFailedShard(state, routingTable.index("idx").shard(0).shards().get(0)));
+ // now bring back node1 and see it's assigned
+ state = service.reroute(
+ ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).add(node1)).build(), "test");
routingTable = state.routingTable();
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
- assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node1");
- assertTrue(routingTable.index("idx").shard(0).shards().get(0).allocatedPostIndexCreate(state.getMetaData().index("idx")));
+ assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
+ assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1");
allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, 0, false);
assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0),
state.getRoutingNodes().node("node2")
- ,allocation), Decision.YES);
+ , allocation), Decision.YES);
assertEquals(filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0),
state.getRoutingNodes().node("node1")
- ,allocation), Decision.YES);
- }
-
- private ClusterState stateFromResult(ClusterState previousState, RoutingAllocation.Result result) {
- return ClusterState.builder(previousState).routingTable(result.routingTable()).metaData(result.metaData()).build();
+ , allocation), Decision.YES);
}
private ClusterState createInitialClusterState(AllocationService service, Settings settings) {
- MetaData.Builder metaBuilder = MetaData.builder();
- metaBuilder.put(IndexMetaData.builder("idx").settings(settings(Version.CURRENT).put(settings))
- .numberOfShards(1).numberOfReplicas(0));
- MetaData metaData = metaBuilder.build();
+ RecoverySource.Type recoveryType = randomFrom(RecoverySource.Type.EMPTY_STORE,
+ RecoverySource.Type.LOCAL_SHARDS, RecoverySource.Type.SNAPSHOT);
+ MetaData.Builder metaData = MetaData.builder();
+ final Settings.Builder indexSettings = settings(Version.CURRENT).put(settings);
+ final IndexMetaData sourceIndex;
+ if (recoveryType == RecoverySource.Type.LOCAL_SHARDS) {
+ //put a fake closed source index
+ sourceIndex = IndexMetaData.builder("sourceIndex")
+ .settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)
+ .putInSyncAllocationIds(0, Collections.singleton("aid0"))
+ .putInSyncAllocationIds(1, Collections.singleton("aid1"))
+ .build();
+ metaData.put(sourceIndex, false);
+ indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID());
+ indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName());
+ } else {
+ sourceIndex = null;
+ }
+ final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder("idx").settings(indexSettings)
+ .numberOfShards(1).numberOfReplicas(1);
+ if (recoveryType == RecoverySource.Type.SNAPSHOT) {
+ indexMetaDataBuilder.putInSyncAllocationIds(0, Collections.singleton("_snapshot_restore"));
+ }
+ final IndexMetaData indexMetaData = indexMetaDataBuilder.build();
+ metaData.put(indexMetaData, false);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
- routingTableBuilder.addAsNew(metaData.index("idx"));
+ switch (recoveryType) {
+ case EMPTY_STORE:
+ routingTableBuilder.addAsNew(indexMetaData);
+ break;
+ case SNAPSHOT:
+ routingTableBuilder.addAsRestore(indexMetaData, new RecoverySource.SnapshotRecoverySource(
+ new Snapshot("repository", new SnapshotId("snapshot_name", "snapshot_uuid")),
+ Version.CURRENT, indexMetaData.getIndex().getName()));
+ break;
+ case LOCAL_SHARDS:
+ routingTableBuilder.addAsFromCloseToOpen(sourceIndex);
+ routingTableBuilder.addAsNew(indexMetaData);
+ break;
+ default:
+ throw new UnsupportedOperationException(recoveryType + " is not supported");
+ }
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
- routingTable = service.reroute(clusterState, "reroute", false).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- return clusterState;
+ return service.reroute(clusterState, "reroute", false);
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
index 84b491dfa7..430809e672 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -28,10 +30,8 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.hamcrest.Matchers;
import java.util.List;
@@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class FilterRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(FilterRoutingTests.class);
public void testClusterFilters() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -58,11 +58,11 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding four nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -71,17 +71,14 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.add(newNode("node3", singletonMap("tag1", "value3")))
.add(newNode("node4", singletonMap("tag1", "value4")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
List<ShardRouting> startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED);
@@ -97,7 +94,7 @@ public class FilterRoutingTests extends ESAllocationTestCase {
logger.info("Building initial routing table");
- MetaData metaData = MetaData.builder()
+ MetaData initialMetaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
@@ -106,11 +103,11 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.build()))
.build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
+ RoutingTable initialRoutingTable = RoutingTable.builder()
+ .addAsNew(initialMetaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(initialMetaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -119,17 +116,14 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.add(newNode("node3", singletonMap("tag1", "value3")))
.add(newNode("node4", singletonMap("tag1", "value4")))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
List<ShardRouting> startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED);
@@ -140,24 +134,21 @@ public class FilterRoutingTests extends ESAllocationTestCase {
logger.info("--> switch between value2 and value4, shards should be relocating");
- metaData = MetaData.builder()
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
- .put("index.number_of_shards", 2)
- .put("index.number_of_replicas", 1)
- .put("index.routing.allocation.include.tag1", "value1,value4")
- .put("index.routing.allocation.exclude.tag1", "value2,value3")
- .build()))
- .build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ IndexMetaData existingMetaData = clusterState.metaData().index("test");
+ MetaData updatedMetaData = MetaData.builder()
+ .put(IndexMetaData.builder(existingMetaData).settings(Settings.builder().put(existingMetaData.getSettings())
+ .put("index.routing.allocation.include.tag1", "value1,value4")
+ .put("index.routing.allocation.exclude.tag1", "value2,value3")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(updatedMetaData).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2));
logger.info("--> finish relocation");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED);
assertThat(startedShards.size(), equalTo(4));
@@ -175,25 +166,23 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes and performing rerouting");
DiscoveryNode node1 = newNode("node1", singletonMap("tag1", "value1"));
DiscoveryNode node2 = newNode("node2", singletonMap("tag1", "value2"));
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node(node1.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2));
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2));
logger.info("--> start the shards (only primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> make sure all shards are started");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4));
@@ -205,29 +194,24 @@ public class FilterRoutingTests extends ESAllocationTestCase {
.build());
logger.info("--> move shards from node1 to node2");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("--> check that concurrent recoveries only allows 1 shard to move");
assertThat(clusterState.getRoutingNodes().node(node1.getId()).numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(2));
logger.info("--> start the shards (only primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> move second shard from node1 to node2");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(3));
logger.info("--> start the shards (only primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(4));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java
new file mode 100644
index 0000000000..6c1813de08
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java
@@ -0,0 +1,293 @@
+package org.elasticsearch.cluster.routing.allocation;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.action.shard.ShardStateAction.ShardEntry;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
+import org.junit.Before;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+
+public class InSyncAllocationIdTests extends ESAllocationTestCase {
+
+ private AllocationService allocation;
+ private ShardStateAction.ShardFailedClusterStateTaskExecutor failedClusterStateTaskExecutor;
+
+
+ @Before
+ public void setupAllocationService() {
+ allocation = createAllocationService();
+ failedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocation, null, logger);
+ }
+
+ public void testInSyncAllocationIdsUpdated() {
+ logger.info("creating an index with 1 shard, 2 replicas");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ // add index metadata where we have no routing nodes to check that allocation ids are not removed
+ .put(IndexMetaData.builder("test-old").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)
+ .putInSyncAllocationIds(0, new HashSet<>(Arrays.asList("x", "y"))))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
+ .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding three nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(
+ newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(0));
+ assertThat(clusterState.metaData().index("test-old").inSyncAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
+
+ logger.info("start primary shard");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1));
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).get(0).allocationId().getId(),
+ equalTo(clusterState.metaData().index("test").inSyncAllocationIds(0).iterator().next()));
+ assertThat(clusterState.metaData().index("test-old").inSyncAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
+
+ logger.info("start replica shards");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3));
+
+ logger.info("remove a node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove("node1"))
+ .build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
+
+ // in-sync allocation ids should not be updated
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3));
+
+ logger.info("remove all remaining nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove("node2").remove("node3"))
+ .build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
+
+ // in-sync allocation ids should not be updated
+ assertThat(clusterState.getRoutingTable().shardsWithState(UNASSIGNED).size(), equalTo(3));
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3));
+
+ // force empty primary
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .add(newNode("node1")))
+ .build();
+ clusterState = allocation.reroute(clusterState,
+ new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false)
+ .getClusterState();
+
+ // check that in-sync allocation ids are reset by forcing an empty primary
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(0));
+
+ logger.info("start primary shard");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1));
+
+ logger.info("fail primary shard");
+ ShardRouting startedPrimary = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0);
+ clusterState = allocation.applyFailedShard(clusterState, startedPrimary);
+
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(0));
+ assertEquals(Collections.singleton(startedPrimary.allocationId().getId()),
+ clusterState.metaData().index("test").inSyncAllocationIds(0));
+ }
+
+ /**
+ * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
+ * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was
+ * removed from the cluster (deassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation
+ * id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id
+ * from the in-sync set.
+ */
+ public void testDeadNodesBeforeReplicaFailed() throws Exception {
+ ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);
+
+ logger.info("remove replica node");
+ IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index("test").shard(0);
+ ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(replicaShard.currentNodeId()))
+ .build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2));
+
+ logger.info("fail replica (for which there is no shard routing in the CS anymore)");
+ assertNull(clusterState.getRoutingNodes().getByAllocationId(replicaShard.shardId(), replicaShard.allocationId().getId()));
+ ShardStateAction.ShardFailedClusterStateTaskExecutor failedClusterStateTaskExecutor =
+ new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocation, null, logger);
+ long primaryTerm = clusterState.metaData().index("test").primaryTerm(0);
+ clusterState = failedClusterStateTaskExecutor.execute(clusterState, Arrays.asList(
+ new ShardEntry(shardRoutingTable.shardId(), replicaShard.allocationId().getId(), primaryTerm, "dummy", null))
+ ).resultingState;
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1));
+ }
+
+ /**
+ * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
+ * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated
+ * reason. Master now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary
+ * is kept in the in-sync allocation set before we acknowledge request to client. Otherwise we would acknowledge a write that made it
+ * into the primary but not the replica but the replica is still considered non-stale.
+ */
+ public void testPrimaryFailureBatchedWithReplicaFailure() throws Exception {
+ ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);
+
+ IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index("test").shard(0);
+ ShardRouting primaryShard = shardRoutingTable.primaryShard();
+ ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);
+
+ long primaryTerm = clusterState.metaData().index("test").primaryTerm(0);
+
+ List<ShardEntry> failureEntries = new ArrayList<>();
+ failureEntries.add(new ShardEntry(
+ shardRoutingTable.shardId(), primaryShard.allocationId().getId(), 0L, "dummy", null));
+ failureEntries.add(new ShardEntry(
+ shardRoutingTable.shardId(), replicaShard.allocationId().getId(), primaryTerm, "dummy", null));
+ Collections.shuffle(failureEntries, random());
+ logger.info("Failing {}", failureEntries);
+
+ clusterState = failedClusterStateTaskExecutor.execute(clusterState, failureEntries).resultingState;
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0),
+ equalTo(Collections.singleton(primaryShard.allocationId().getId())));
+
+ // resend shard failures to check if they are ignored
+ clusterState = failedClusterStateTaskExecutor.execute(clusterState, failureEntries).resultingState;
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0),
+ equalTo(Collections.singleton(primaryShard.allocationId().getId())));
+ }
+
+ /**
+ * Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary
+ * but repeatedly shut down nodes that have active replicas.
+ * We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set
+ */
+ public void testInSyncIdsNotGrowingWithoutBounds() throws Exception {
+ ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);
+
+ Set<String> inSyncSet = clusterState.metaData().index("test").inSyncAllocationIds(0);
+ assertThat(inSyncSet.size(), equalTo(2));
+
+ IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index("test").shard(0);
+ ShardRouting primaryShard = shardRoutingTable.primaryShard();
+ ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);
+
+ logger.info("remove a node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(replicaShard.currentNodeId()))
+ .build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
+
+ // in-sync allocation ids should not be updated
+ assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0));
+
+ // check that inSyncAllocationIds can not grow without bounds
+ for (int i = 0; i < 5; i++) {
+ logger.info("add back node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .add(newNode(replicaShard.currentNodeId())))
+ .build();
+ clusterState = allocation.reroute(clusterState, "reroute");
+
+ logger.info("start replica shards");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+
+ logger.info("remove the node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(replicaShard.currentNodeId()))
+ .build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
+ }
+
+ // in-sync allocation set is bounded
+ Set<String> newInSyncSet = clusterState.metaData().index("test").inSyncAllocationIds(0);
+ assertThat(newInSyncSet.size(), equalTo(2));
+ // only allocation id of replica was changed
+ assertFalse(Sets.haveEmptyIntersection(inSyncSet, newInSyncSet));
+ assertThat(newInSyncSet, hasItem(primaryShard.allocationId().getId()));
+ }
+
+ private ClusterState createOnePrimaryOneReplicaClusterState(AllocationService allocation) {
+ logger.info("creating an index with 1 shard, 1 replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
+ .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(
+ newNode("node1")).add(newNode("node2"))).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
+
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(0));
+
+ logger.info("start primary shard");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1));
+ assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).get(0).allocationId().getId(),
+ equalTo(clusterState.metaData().index("test").inSyncAllocationIds(0).iterator().next()));
+
+ logger.info("start replica shard");
+ clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+ assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2));
+ return clusterState;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
index 9b93e556b3..986d08843a 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
@@ -19,30 +19,31 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
/**
*
*/
public class IndexBalanceTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+ private final Logger logger = Loggers.getLogger(IndexBalanceTests.class);
public void testBalanceAllNodesStarted() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -56,109 +57,95 @@ public class IndexBalanceTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
-
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
@@ -186,142 +173,124 @@ public class IndexBalanceTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
- assertThat(prevRoutingTable == routingTable, equalTo(true));
logger.info("Start the primary shard");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
-
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
@@ -347,91 +316,76 @@ public class IndexBalanceTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
- assertThat(prevRoutingTable == routingTable, equalTo(true));
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
}
-
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
-
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
@@ -442,88 +396,72 @@ public class IndexBalanceTests extends ESAllocationTestCase {
logger.info("Add new index 3 shards 1 replica");
- prevRoutingTable = routingTable;
- metaData = MetaData.builder(metaData)
- .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
- .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
- .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
- ))
- .build();
- routingTable = RoutingTable.builder(routingTable)
- .addAsNew(metaData.index("test1"))
- .build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
-
-
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
-
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ MetaData updatedMetaData = MetaData.builder(clusterState.metaData())
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(updatedMetaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(updatedMetaData).routingTable(updatedRoutingTable).build();
+
+
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
- assertThat(prevRoutingTable == routingTable, equalTo(true));
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
- assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
- assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
}
-
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
-
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
@@ -531,6 +469,5 @@ public class IndexBalanceTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
-
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java
index df4e155032..31e2330a60 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -34,8 +35,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Collections;
import java.util.List;
@@ -43,6 +43,8 @@ import java.util.List;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
@@ -53,7 +55,7 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
super.setUp();
strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
}
private ClusterState createInitialClusterState() {
@@ -86,34 +88,37 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);
// now fail it N-1 times
for (int i = 0; i < retries-1; i++) {
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
new UnsupportedOperationException()));
- RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i+1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage(), "boom" + i);
}
// now we go and check that we are actually stick to unassigned on the next failure
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
new UnsupportedOperationException()));
- RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage(), "boom");
- result = strategy.reroute(clusterState, new AllocationCommands(), false, true); // manual reroute should retry once
- assertTrue(result.changed());
- routingTable = result.routingTable();
+ // manual reroute should retry once
+ newState = strategy.reroute(clusterState, new AllocationCommands(), false, true).getClusterState();
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
+
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);
@@ -122,12 +127,13 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
// now we go and check that we are actually stick to unassigned on the next failure ie. no retry
failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
new UnsupportedOperationException()));
- result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries+1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
@@ -141,13 +147,13 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);
// now fail it N-1 times
for (int i = 0; i < retries-1; i++) {
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i,
new UnsupportedOperationException()));
- RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.state(), INITIALIZING);
@@ -159,13 +165,13 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
}
// now we go and check that we are actually stick to unassigned on the next failure
{
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom",
new UnsupportedOperationException()));
- RoutingAllocation.Result result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), retries);
@@ -183,10 +189,10 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
Settings.builder().put(clusterState.metaData().index("idx").getSettings()).put("index.allocation.max_retries",
retries+1).build()
).build(), true).build()).build();
- RoutingAllocation.Result result = strategy.reroute(clusterState, "settings changed", false);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "settings changed", false);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
// good we are initializing and we are maintaining failure information
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
@@ -198,9 +204,9 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0, false)));
// now we start the shard
- routingTable = strategy.applyStartedShards(clusterState, Collections.singletonList(
- routingTable.index("idx").shard(0).shards().get(0))).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(
+ routingTable.index("idx").shard(0).shards().get(0)));
+ routingTable = clusterState.routingTable();
// all counters have been reset to 0 ie. no unassigned info
assertEquals(routingTable.index("idx").shards().size(), 1);
@@ -208,17 +214,17 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), STARTED);
// now fail again and see if it has a new counter
- List<FailedRerouteAllocation.FailedShard> failedShards = Collections.singletonList(
- new FailedRerouteAllocation.FailedShard(routingTable.index("idx").shard(0).shards().get(0), "ZOOOMG",
+ List<FailedShard> failedShards = Collections.singletonList(
+ new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "ZOOOMG",
new UnsupportedOperationException()));
- result = strategy.applyFailedShards(clusterState, failedShards);
- assertTrue(result.changed());
- routingTable = result.routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyFailedShards(clusterState, failedShards);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+ routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), 1);
- assertEquals(unassignedPrimary.state(), INITIALIZING);
+ assertEquals(unassignedPrimary.state(), UNASSIGNED);
assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "ZOOOMG");
// Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision
assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary(
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
index 9050a91222..b472cc5e0d 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -19,17 +19,20 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -37,24 +40,24 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.common.UUIDs;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.VersionUtils;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import static java.util.Collections.emptyMap;
@@ -64,7 +67,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.elasticsearch.test.VersionUtils.randomVersion;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@@ -72,7 +74,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
+ private final Logger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
public void testDoNotAllocateFromPrimary() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -87,104 +89,87 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
-
- assertThat(routingTable.index("test").shards().size(), equalTo(5));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
}
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3", VersionUtils.getPreviousVersion())))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.reroute(clusterState, "reroute");
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
}
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
}
}
@@ -256,15 +241,15 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(5));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("old0", VersionUtils.getPreviousVersion()))
@@ -292,15 +277,14 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
.add(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
- routingTable = clusterState.routingTable();
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
}
}
@@ -313,21 +297,25 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
+ AllocationId allocationId1P = AllocationId.newInitializing();
+ AllocationId allocationId1R = AllocationId.newInitializing();
+ AllocationId allocationId2P = AllocationId.newInitializing();
+ AllocationId allocationId2R = AllocationId.newInitializing();
MetaData metaData = MetaData.builder()
- .put(IndexMetaData.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1))
- .put(IndexMetaData.builder(shard2.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId())))
+ .put(IndexMetaData.builder(shard2.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId())))
.build();
RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shard1.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shard1)
- .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
- .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
+ .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId1P))
+ .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId1R))
.build())
)
.add(IndexRoutingTable.builder(shard2.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shard2)
- .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), true, ShardRoutingState.STARTED))
- .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), false, ShardRoutingState.STARTED))
+ .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId2P))
+ .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId2R))
.build())
)
.build();
@@ -335,15 +323,14 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
- AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new NodeVersionAllocationDecider(Settings.EMPTY)});
+ AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Collections.singleton(new NodeVersionAllocationDecider(Settings.EMPTY)));
AllocationService strategy = new MockAllocationService(Settings.EMPTY,
allocationDeciders,
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
- RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true, false);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState();
// the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match
- state = ClusterState.builder(state).routingResult(result).build();
- assertThat(result.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
- assertThat(result.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
+ assertThat(state.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
+ assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
}
public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() {
@@ -355,55 +342,50 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
int numberOfShards = randomIntBetween(1, 3);
- MetaData metaData = MetaData.builder()
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas
- (randomIntBetween(0, 3)))
- .build();
+ final IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT))
+ .numberOfShards(numberOfShards).numberOfReplicas(randomIntBetween(0, 3));
+ for (int i = 0; i < numberOfShards; i++) {
+ indexMetaData.putInSyncAllocationIds(i, Collections.singleton("_test_"));
+ }
+ MetaData metaData = MetaData.builder().put(indexMetaData).build();
ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"),
- new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
+ new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
Version.CURRENT, "test")).build())
.nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
- AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{
+ AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(
new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY),
- new NodeVersionAllocationDecider(Settings.EMPTY)});
+ new NodeVersionAllocationDecider(Settings.EMPTY)));
AllocationService strategy = new MockAllocationService(Settings.EMPTY,
allocationDeciders,
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
- RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true, false);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState();
// Make sure that primary shards are only allocated on the new node
for (int i = 0; i < numberOfShards; i++) {
- assertEquals("newNode", result.routingTable().index("test").getShards().get(i).primaryShard().currentNodeId());
+ assertEquals("newNode", state.routingTable().index("test").getShards().get(i).primaryShard().currentNodeId());
}
}
private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
- RoutingTable routingTable = service.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = service.deassociateDeadNodes(clusterState, true, "reroute");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertRecoveryNodeVersions(routingNodes);
logger.info("complete rebalancing");
- RoutingTable prev = routingTable;
- boolean stable = false;
- for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests
+ boolean changed;
+ do {
logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ changed = newState.equals(clusterState) == false;
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- if (stable = (routingTable == prev)) {
- break;
- }
assertRecoveryNodeVersions(routingNodes);
- prev = routingTable;
- }
- logger.info("stabilized success [{}]", stable);
- assertThat(stable, is(true));
+ } while (changed);
return clusterState;
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
index 9c24993a72..7e528e601d 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
@@ -20,13 +20,13 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -38,6 +38,7 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocationTestCase {
+
public void testPreferLocalPrimaryAllocationOverFiltered() {
int concurrentRecoveries = randomIntBetween(1, 10);
int primaryRecoveries = randomIntBetween(1, 10);
@@ -57,43 +58,40 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("adding two nodes and performing rerouting till all are allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
- .add(newNode("node1", singletonMap("tag1", "value1")))
- .add(newNode("node2", singletonMap("tag1", "value2")))).build();
+ .add(newNode("node1")).add(newNode("node2"))).build();
+
+ clusterState = strategy.reroute(clusterState, "reroute");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) {
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
}
logger.info("remove one of the nodes and apply filter to move everything from another node");
metaData = MetaData.builder()
- .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.builder(clusterState.metaData().index("test1")).settings(settings(Version.CURRENT)
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", 0)
- .put("index.routing.allocation.exclude.tag1", "value2")
+ .put("index.routing.allocation.exclude._name", "node2")
.build()))
- .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.builder(clusterState.metaData().index("test2")).settings(settings(Version.CURRENT)
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", 0)
- .put("index.routing.allocation.exclude.tag1", "value2")
+ .put("index.routing.allocation.exclude._name", "node2")
.build()))
.build();
clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards);
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards));
@@ -103,8 +101,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
logger.info("start node back up");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node1", singletonMap("tag1", "value1")))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
while (clusterState.getRoutingNodes().shardsWithState(STARTED).size() < totalNumberOfShards) {
int localInitializations = 0;
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
index b9ac52c69a..1c209157b1 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
@@ -19,17 +19,17 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.hamcrest.Matchers.equalTo;
@@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class PreferPrimaryAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class);
public void testPreferPrimaryAllocationOverReplicas() {
logger.info("create an allocation with 1 initial recoveries");
@@ -54,30 +54,27 @@ public class PreferPrimaryAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("adding two nodes and performing rerouting till all are allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) {
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
}
logger.info("increasing the number of replicas to 1, and perform a reroute (to get the replicas allocation going)");
- routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(1).build();
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("2 replicas should be initializing now for the existing indices (we throttle to 1)");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
@@ -87,15 +84,14 @@ public class PreferPrimaryAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("new_index").settings(settings(Version.CURRENT)).numberOfShards(4).numberOfReplicas(0))
.build();
- routingTable = RoutingTable.builder(clusterState.routingTable())
+ updatedRoutingTable = RoutingTable.builder(clusterState.routingTable())
.addAsNew(metaData.index("new_index"))
.build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(updatedRoutingTable).build();
logger.info("reroute, verify that primaries for the new index primary shards are allocated");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.routingTable().index("new_index").shardsWithState(INITIALIZING).size(), equalTo(2));
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
index cca0a5345d..d789e6c4ec 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
@@ -19,20 +19,21 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
@@ -40,7 +41,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class);
public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -59,28 +60,23 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- result = strategy.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- result = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
logger.info("Start the backup shard (on node2)");
routingNodes = clusterState.getRoutingNodes();
- result = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
logger.info("Adding third node and reroute and kill first node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).remove("node1")).build();
RoutingTable prevRoutingTable = clusterState.routingTable();
- result = strategy.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
routingNodes = clusterState.getRoutingNodes();
routingTable = clusterState.routingTable();
@@ -112,13 +108,11 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ clusterState = allocation.reroute(clusterState, "reroute");
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- rerouteResult = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ clusterState = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2));
@@ -130,16 +124,14 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
logger.info("--> fail node with primary");
String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
- .add(newNode(nodeIdRemaining))
- ).build();
- rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
- clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode(nodeIdRemaining))).build();
+ clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(INITIALIZING).get(0).primary(), equalTo(true));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingNodes.shardsWithState(UNASSIGNED).size(), equalTo(3)); // 2 replicas and one primary
+ assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(STARTED).get(0).primary(), equalTo(true));
assertThat(clusterState.metaData().index("test").primaryTerm(0), equalTo(2L));
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
index 609d832456..f2673805fa 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
@@ -19,17 +19,17 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo;
*
*/
public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class);
+ private final Logger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class);
public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -54,38 +54,34 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
logger.info("start another node, replica will start recovering form primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(5));
logger.info("start another node, make sure the primary is not relocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(5));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
index 061aa90188..6722e04803 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
@@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -37,11 +38,11 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.hamcrest.Matchers;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Random;
@@ -58,7 +59,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random());
AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY),
- randomAllocationDecider))), NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
+ randomAllocationDecider))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
int indices = scaledRandomIntBetween(1, 20);
Builder metaBuilder = MetaData.builder();
int maxNumReplicas = 1;
@@ -77,12 +78,13 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
}
- RoutingTable routingTable = routingTableBuilder.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ RoutingTable initialRoutingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
int numIters = scaledRandomIntBetween(5, 15);
int nodeIdCounter = 0;
int atMostNodes = scaledRandomIntBetween(Math.max(1, maxNumReplicas), 15);
final boolean frequentNodes = randomBoolean();
+ AllocationService.CommandsResult routingResult;
for (int i = 0; i < numIters; i++) {
logger.info("Start iteration [{}]", i);
ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
@@ -100,23 +102,36 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
boolean nodesRemoved = false;
if (nodeIdCounter > 1 && rarely()) {
int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2);
- logger.info("removing node [{}]", nodeId);
- newNodesBuilder.remove("NODE_" + nodeId);
- nodesRemoved = true;
+ final String node = "NODE_" + nodeId;
+ boolean safeToRemove = true;
+ RoutingNode routingNode = clusterState.getRoutingNodes().node(node);
+ for (ShardRouting shard: routingNode != null ? routingNode : Collections.<ShardRouting>emptyList()) {
+ if (shard.active() && shard.primary()) {
+ // make sure there is an active replica to prevent from going red
+ if (clusterState.routingTable().shardRoutingTable(shard.shardId()).activeShards().size() <= 1) {
+ safeToRemove = false;
+ break;
+ }
+ }
+ }
+ if (safeToRemove) {
+ logger.info("removing node [{}]", nodeId);
+ newNodesBuilder.remove(node);
+ nodesRemoved = true;
+ } else {
+ logger.debug("not removing node [{}] as it holds a primary with no replacement", nodeId);
+ }
}
stateBuilder.nodes(newNodesBuilder.build());
clusterState = stateBuilder.build();
if (nodesRemoved) {
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
} else {
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
+ clusterState = strategy.reroute(clusterState, "reroute");
}
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) {
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
}
}
logger.info("Fill up nodes such that every shard can be allocated");
@@ -137,17 +152,14 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
int iterations = 0;
do {
iterations++;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) {
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
}
} while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 ||
clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
- logger.info("Done Balancing after [{}] iterations", iterations);
+ logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState.prettyPrint());
// we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
index b1d83b767b..ff2020d684 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
@@ -19,10 +19,12 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -31,10 +33,8 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class RebalanceAfterActiveTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(RebalanceAfterActiveTests.class);
+ private final Logger logger = Loggers.getLogger(RebalanceAfterActiveTests.class);
public void testRebalanceOnlyAfterAllShardsAreActive() {
final long[] sizes = new long[5];
@@ -83,77 +83,69 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(5));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ clusterState = strategy.reroute(clusterState, "reroute");
+
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertEquals(routingTable.index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10")))
.build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertEquals(routingTable.index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("start the replica shards, rebalancing should start");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
// we only allow one relocation at a time
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(5));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(5));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
int num = 0;
- for (ShardRouting routing : routingTable.index("test").shard(i).shards()) {
+ for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
num++;
@@ -164,16 +156,14 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
logger.info("complete relocation, other half of relocation should happen");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
// we now only relocate 3, since 2 remain where they are!
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- for (ShardRouting routing : routingTable.index("test").shard(i).shards()) {
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
}
@@ -183,12 +173,10 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
logger.info("complete relocation, that's it!");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
// make sure we have an even relocation
for (RoutingNode routingNode : routingNodes) {
assertThat(routingNode.size(), equalTo(1));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
index 440d651f77..cf9db4ec54 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -26,10 +27,9 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
+ private final Logger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
public void testBackupIsAllocatedAfterPrimary() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
index cd31f75b50..6c837ed2b2 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -29,20 +31,19 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
/**
*
*/
public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+ private final Logger logger = Loggers.getLogger(IndexBalanceTests.class);
public void testBalanceAllNodesStarted() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -56,15 +57,14 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
- RoutingNodes routingNodes = clusterState.getRoutingNodes();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
- routingNodes = clusterState.getRoutingNodes();
+ RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
// all shards are unassigned. so no inactive shards or primaries.
@@ -72,9 +72,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -84,25 +82,18 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -110,9 +101,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- routingNodes = clusterState.getRoutingNodes();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
}
@@ -128,72 +117,59 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the primary shard");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
@@ -220,9 +196,9 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
- RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
@@ -234,9 +210,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -246,20 +220,15 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -271,15 +240,12 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -297,17 +263,16 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Add new index 3 shards 1 replica");
- prevRoutingTable = routingTable;
- metaData = MetaData.builder(metaData)
+ metaData = MetaData.builder(clusterState.metaData())
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
))
.build();
- routingTable = RoutingTable.builder(routingTable)
- .addAsNew(metaData.index("test1"))
- .build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(updatedRoutingTable).build();
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -315,17 +280,14 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
- assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Reroute, assign");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -333,13 +295,10 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
- assertThat(prevRoutingTable == routingTable, equalTo(true));
logger.info("Reroute, start the primaries");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -349,9 +308,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Reroute, start the replicas");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -369,10 +326,9 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
logger.info("kill one node");
- IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0);
+ IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -383,9 +339,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Start Recovering shards round 1");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@@ -395,9 +349,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Start Recovering shards round 2");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
index 331adcd146..66fe40793d 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
@@ -19,9 +19,11 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -30,11 +32,9 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.test.ESAllocationTestCase;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@@ -44,11 +44,11 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class SameShardRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(SameShardRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(SameShardRoutingTests.class);
public void testSameHost() {
- AllocationService strategy = createAllocationService(Settings.builder()
- .put(SameShardAllocationDecider.SAME_HOST_SETTING, true).build());
+ AllocationService strategy = createAllocationService(
+ Settings.builder().put(SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), true).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
@@ -67,15 +67,12 @@ public class SameShardRoutingTests extends ESAllocationTestCase {
MASTER_DATA_ROLES, Version.CURRENT))
.add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
logger.info("--> start all primary shards, no replica will be started since its on the same host");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(0));
@@ -84,8 +81,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
index d50e44c48d..534e2af5a8 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -27,10 +28,9 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -38,7 +38,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
public class ShardVersioningTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class);
+ private final Logger logger = Loggers.getLogger(ShardVersioningTests.class);
public void testSimple() {
AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
index a90b88fa9d..85948f3c52 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -29,10 +31,8 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class ShardsLimitAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);
public void testIndexLevelShardsLimitAllocate() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -64,16 +64,14 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
@@ -83,8 +81,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
logger.info("Do another reroute, make sure its still not allocated");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
}
public void testClusterLevelShardsLimitAllocate() {
@@ -108,16 +105,14 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1));
@@ -130,15 +125,13 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
.build());
logger.info("Do another reroute, make sure shards are now allocated");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
@@ -164,44 +157,40 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), STARTED), equalTo(5));
logger.info("add another index with 5 shards");
- metaData = MetaData.builder(metaData)
+ metaData = MetaData.builder(clusterState.metaData())
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
))
.build();
- routingTable = RoutingTable.builder(routingTable)
- .addAsNew(metaData.index("test1"))
- .build();
+ RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(metaData.index("test1"))
+ .build();
- clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(updatedRoutingTable).build();
logger.info("Add another one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), STARTED), equalTo(10));
@@ -213,8 +202,8 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
}
logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey());
- metaData = MetaData.builder(metaData)
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ metaData = MetaData.builder(clusterState.metaData())
+ .put(IndexMetaData.builder(clusterState.metaData().index("test")).settings(settings(Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 3)
@@ -225,8 +214,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
logger.info("reroute after setting");
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(3));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
@@ -235,8 +223,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
// the first move will destroy the balance and the balancer will move 2 shards from node2 to node one right after
// moving the nodes to node2 since we consider INITIALIZING nodes during rebalance
routingNodes = clusterState.getRoutingNodes();
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
index 1b4d35d44d..dd89d6b6a5 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
@@ -19,8 +19,10 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -30,10 +32,8 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.ArrayList;
import java.util.HashSet;
@@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);
public void testSingleIndexStartedShard() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -67,96 +67,99 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
+ RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
logger.info("Rerouting again, nothing should change");
- prevRoutingTable = routingTable;
clusterState = ClusterState.builder(clusterState).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(routingTable == prevRoutingTable, equalTo(true));
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
+ clusterState = newState;
logger.info("Marking the shard as started");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(routingTable != prevRoutingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
logger.info("Starting another node and making sure nothing changed");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
+ clusterState = newState;
- assertThat(routingTable == prevRoutingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
- logger.info("Killing node1 where the shard is, checking the shard is relocated");
+ logger.info("Killing node1 where the shard is, checking the shard is unassigned");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Bring node1 back, and see it's assinged");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node1"))).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
- assertThat(routingTable != prevRoutingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- assertThat(routingTable == prevRoutingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
- logger.info("Start the shard on node 2");
+ logger.info("Start the shard on node 1");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(routingTable != prevRoutingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
}
public void testSingleIndexShardFailed() {
@@ -168,44 +171,41 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder()
+ .addAsNew(metaData.index("test"));
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
logger.info("Adding one node and rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
logger.info("Marking the shard as failed");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ newState = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
}
public void testMultiIndexEvenDistribution() {
@@ -228,16 +228,15 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
for (int i = 0; i < numberOfIndices; i++) {
routingTableBuilder.addAsNew(metaData.index("test" + i));
}
- RoutingTable routingTable = routingTableBuilder.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build();
- assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+ assertThat(clusterState.routingTable().indicesRouting().size(), equalTo(numberOfIndices));
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
}
logger.info("Adding " + (numberOfIndices / 2) + " nodes");
@@ -246,21 +245,20 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
for (int i = 0; i < (numberOfIndices / 2); i++) {
nodesBuilder.add(newNode("node" + i));
}
- RoutingTable prevRoutingTable = routingTable;
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
// make sure we still have 2 shards initializing per node on the first 25 nodes
- String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ String nodeId = clusterState.routingTable().index("test" + i).shard(0).shards().get(0).currentNodeId();
int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
assertThat(nodeIndex, lessThan(25));
}
@@ -284,35 +282,31 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
nodesBuilder.add(newNode("node" + i));
}
- prevRoutingTable = routingTable;
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(false));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Marking the shard as started");
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
- assertThat(prevRoutingTable != routingTable, equalTo(true));
+ clusterState = newState;
int numberOfRelocatingShards = 0;
int numberOfStartedShards = 0;
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
- if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) {
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
+ if (clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state() == STARTED) {
numberOfStartedShards++;
- } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
+ } else if (clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
numberOfRelocatingShards++;
}
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
// make sure we still have 2 shards either relocating or started on the first 25 nodes (still)
- String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ String nodeId = clusterState.routingTable().index("test" + i).shard(0).shards().get(0).currentNodeId();
int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
assertThat(nodeIndex, lessThan(25));
}
@@ -340,26 +334,24 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
for (int i = 0; i < numberOfIndices; i++) {
routingTableBuilder.addAsNew(metaData.index("test" + i));
}
- RoutingTable routingTable = routingTableBuilder.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build();
- assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+ assertThat(clusterState.routingTable().indicesRouting().size(), equalTo(numberOfIndices));
logger.info("Starting 3 nodes and rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")))
.build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
}
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(numberOfIndices));
@@ -371,41 +363,36 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4")).add(newNode("node5")))
.build();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
-
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
}
routingNodes = clusterState.getRoutingNodes();
assertThat("4 source shard routing are relocating", numberOfShardsOfType(routingNodes, RELOCATING), equalTo(4));
assertThat("4 target shard routing are initializing", numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(4));
logger.info("Now, mark the relocated as started");
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
// routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes);
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
for (int i = 0; i < numberOfIndices; i++) {
- assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
- assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
}
routingNodes = clusterState.getRoutingNodes();
assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
index 0eb317d198..0990850aca 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
@@ -19,29 +19,30 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
/**
*
*/
public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class);
public void testSingleIndexFirstStartPrimaryThenBackups() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -52,118 +53,109 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Kill node1, backup shard should become primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ newState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Start another node, backup shard should start initializing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java
index df169e3b89..454e841048 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java
@@ -21,9 +21,11 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
@@ -33,11 +35,12 @@ import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Arrays;
+import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
public class StartedShardsRoutingTests extends ESAllocationTestCase {
@@ -45,9 +48,11 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase {
AllocationService allocation = createAllocationService();
logger.info("--> building initial cluster state");
+ AllocationId allocationId = AllocationId.newRelocation(AllocationId.newInitializing());
final IndexMetaData indexMetaData = IndexMetaData.builder("test")
.settings(settings(Version.CURRENT))
.numberOfShards(2).numberOfReplicas(0)
+ .putInSyncAllocationIds(1, Collections.singleton(allocationId.getId()))
.build();
final Index index = indexMetaData.getIndex();
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
@@ -55,7 +60,7 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase {
.metaData(MetaData.builder().put(indexMetaData, false));
final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING);
- final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node1", "node2", true, ShardRoutingState.RELOCATING);
+ final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node1", "node2", true, ShardRoutingState.RELOCATING, allocationId);
stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index)
.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build())
.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build());
@@ -64,16 +69,18 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase {
logger.info("--> test starting of shard");
- RoutingAllocation.Result result = allocation.applyStartedShards(state, Arrays.asList(initShard), false);
- assertTrue("failed to start " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed());
- assertTrue(initShard + "isn't started \ncurrent routing table:" + result.routingTable().prettyPrint(),
- result.routingTable().index("test").shard(initShard.id()).allShardsStarted());
-
+ ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(initShard));
+ assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(),
+ newState, not(equalTo(state)));
+ assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable().prettyPrint(),
+ newState.routingTable().index("test").shard(initShard.id()).allShardsStarted());
+ state = newState;
logger.info("--> testing starting of relocating shards");
- result = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard()), false);
- assertTrue("failed to start " + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed());
- ShardRouting shardRouting = result.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0);
+ newState = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard()));
+ assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(),
+ newState, not(equalTo(state)));
+ ShardRouting shardRouting = newState.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0);
assertThat(shardRouting.state(), equalTo(ShardRoutingState.STARTED));
assertThat(shardRouting.currentNodeId(), equalTo("node2"));
assertThat(shardRouting.relocatingNodeId(), nullValue());
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
index 556c97a6ef..5907232b5f 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
@@ -19,18 +19,18 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@@ -38,13 +38,14 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
/**
*
*/
public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class);
+ private final Logger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class);
public void testSingleIndexFirstStartPrimaryThenBackups() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -63,103 +64,95 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ ClusterState newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
}
logger.info("Reroute, nothing should change");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- assertThat(prevRoutingTable == routingTable, equalTo(true));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
- for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
- assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
- assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
+ assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
}
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
logger.info("Add another node and perform rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
@@ -168,13 +161,12 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
logger.info("Start the shards on node 3");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ newState = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(7));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(7));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
index b94aec95ca..894b5b42f0 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
@@ -20,23 +20,35 @@
package org.elasticsearch.cluster.routing.allocation;
import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingHelper;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
+import java.util.Collections;
+
+import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -47,13 +59,15 @@ import static org.hamcrest.Matchers.equalTo;
*
*/
public class ThrottlingAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
public void testPrimaryRecoveryThrottling() {
+
+ TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
AllocationService strategy = createAllocationService(Settings.builder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 3)
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
- .build());
+ .build(), gatewayAllocator);
logger.info("Building initial routing table");
@@ -61,58 +75,53 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
.build();
- RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test"));
-
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator);
logger.info("start one node, do reroute, only 3 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(17));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(17));
logger.info("start initializing, another 3 should initialize");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(14));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(14));
logger.info("start initializing, another 3 should initialize");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(6));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(11));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(11));
logger.info("start initializing, another 1 should initialize");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(9));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(10));
logger.info("start initializing, all primaries should be started");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(10));
}
public void testReplicaAndPrimaryRecoveryThrottling() {
+ TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
AllocationService strategy = createAllocationService(Settings.builder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 3)
.put("cluster.routing.allocation.concurrent_source_recoveries", 3)
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
- .build());
+ .build(),
+ gatewayAllocator);
logger.info("Building initial routing table");
@@ -120,120 +129,104 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
- RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test"));
+ ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator);
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ logger.info("with one node, do reroute, only 3 should initialize");
+ clusterState = strategy.reroute(clusterState, "reroute");
- logger.info("start one node, do reroute, only 3 should initialize");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(7));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(7));
logger.info("start initializing, another 2 should initialize");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(5));
logger.info("start initializing, all primaries should be started");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(5));
logger.info("start another node, replicas should start being allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2));
logger.info("start initializing replicas");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
logger.info("start initializing replicas, all should be started");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
}
public void testThrottleIncomingAndOutgoing() {
+ TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
Settings settings = Settings.builder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 5)
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 5)
.put("cluster.routing.allocation.cluster_concurrent_rebalance", 5)
.build();
- AllocationService strategy = createAllocationService(settings);
+ AllocationService strategy = createAllocationService(settings, gatewayAllocator);
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(9).numberOfReplicas(0))
.build();
- RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test"));
-
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator);
- logger.info("start one node, do reroute, only 5 should initialize");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(4));
+ logger.info("with one node, do reroute, only 5 should initialize");
+ clusterState = strategy.reroute(clusterState, "reroute");
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(4));
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 5);
logger.info("start initializing, all primaries should be started");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(4));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(4));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(4));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(4));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 3);
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 2);
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0);
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 5);
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
logger.info("start the relocating shards, one more shard should relocate away from node1");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 0);
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 1);
assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0);
@@ -241,9 +234,10 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
}
public void testOutgoingThrottlesAllocation() {
+ TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
AllocationService strategy = createAllocationService(Settings.builder()
.put("cluster.routing.allocation.node_concurrent_outgoing_recoveries", 1)
- .build());
+ .build(), gatewayAllocator);
logger.info("Building initial routing table");
@@ -251,96 +245,129 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
.build();
- RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test"));
-
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator);
- logger.info("start one node, do reroute, only 1 should initialize");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("with one node, do reroute, only 1 should initialize");
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2));
logger.info("start initializing");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2));
logger.info("start one more node, first non-primary should start being allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(1));
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1);
logger.info("start initializing non-primary");
- routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1));
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(1));
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0);
logger.info("start one more node, initializing second non-primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1);
logger.info("start one more node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1);
logger.info("move started non-primary to new node");
- RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(
+ AllocationService.CommandsResult commandsResult = strategy.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand("test", 0, "node2", "node4")), true, false);
- assertEquals(reroute.explanations().explanations().size(), 1);
- assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);
+ assertEquals(commandsResult.explanations().explanations().size(), 1);
+ assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE);
// even though it is throttled, move command still forces allocation
- clusterState = ClusterState.builder(clusterState).routingResult(reroute).build();
- routingTable = clusterState.routingTable();
- assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1));
- assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
- assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ clusterState = commandsResult.getClusterState();
+ assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0));
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 2);
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0);
}
- private RoutingTable createRecoveryRoutingTable(IndexMetaData indexMetaData) {
+ private ClusterState createRecoveryStateAndInitalizeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) {
+ DiscoveryNode node1 = newNode("node1");
+ MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
- switch (randomInt(5)) {
- case 0: routingTableBuilder.addAsRecovery(indexMetaData); break;
- case 1: routingTableBuilder.addAsFromCloseToOpen(indexMetaData); break;
- case 2: routingTableBuilder.addAsFromDangling(indexMetaData); break;
- case 3: routingTableBuilder.addAsNewRestore(indexMetaData,
- new RestoreSource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
- indexMetaData.getIndex().getName()), new IntHashSet()); break;
- case 4: routingTableBuilder.addAsRestore(indexMetaData,
- new RestoreSource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
- indexMetaData.getIndex().getName())); break;
- case 5: routingTableBuilder.addAsNew(indexMetaData); break;
- default: throw new IndexOutOfBoundsException();
+ for (ObjectCursor<IndexMetaData> cursor: metaData.indices().values()) {
+ Index index = cursor.value.getIndex();
+ IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(cursor.value);
+ final int recoveryType = randomInt(5);
+ if (recoveryType <= 4) {
+ addInSyncAllocationIds(index, indexMetaDataBuilder, gatewayAllocator, node1);
+ }
+ IndexMetaData indexMetaData = indexMetaDataBuilder.build();
+ metaDataBuilder.put(indexMetaData, false);
+ switch (recoveryType) {
+ case 0:
+ routingTableBuilder.addAsRecovery(indexMetaData);
+ break;
+ case 1:
+ routingTableBuilder.addAsFromCloseToOpen(indexMetaData);
+ break;
+ case 2:
+ routingTableBuilder.addAsFromDangling(indexMetaData);
+ break;
+ case 3:
+ routingTableBuilder.addAsNewRestore(indexMetaData,
+ new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
+ indexMetaData.getIndex().getName()), new IntHashSet());
+ break;
+ case 4:
+ routingTableBuilder.addAsRestore(indexMetaData,
+ new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
+ indexMetaData.getIndex().getName()));
+ break;
+ case 5:
+ routingTableBuilder.addAsNew(indexMetaData);
+ break;
+ default:
+ throw new IndexOutOfBoundsException();
+ }
}
-
- return routingTableBuilder.build();
+ return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .nodes(DiscoveryNodes.builder().add(node1))
+ .metaData(metaDataBuilder.build())
+ .routingTable(routingTableBuilder.build()).build();
}
+ private void addInSyncAllocationIds(Index index, IndexMetaData.Builder indexMetaData,
+ TestGatewayAllocator gatewayAllocator, DiscoveryNode node1) {
+ for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
+
+ final boolean primary = randomBoolean();
+ final ShardRouting unassigned = ShardRouting.newUnassigned(new ShardId(index, shard), primary,
+ primary ?
+ RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE :
+ RecoverySource.PeerRecoverySource.INSTANCE,
+ new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test")
+ );
+ ShardRouting started = ShardRoutingHelper.moveToStarted(ShardRoutingHelper.initialize(unassigned, node1.getId()));
+ indexMetaData.putInSyncAllocationIds(shard, Collections.singleton(started.allocationId().getId()));
+ gatewayAllocator.addKnownAllocation(started);
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java
new file mode 100644
index 0000000000..412cc3322f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Unit tests for the {@link UnassignedShardDecision} class.
+ */
+public class UnassignedShardDecisionTests extends ESTestCase {
+
+ public void testDecisionNotTaken() {
+ UnassignedShardDecision unassignedShardDecision = UnassignedShardDecision.DECISION_NOT_TAKEN;
+ assertFalse(unassignedShardDecision.isDecisionTaken());
+ assertNull(unassignedShardDecision.getFinalDecision());
+ assertNull(unassignedShardDecision.getAllocationStatus());
+ assertNull(unassignedShardDecision.getAllocationId());
+ assertNull(unassignedShardDecision.getAssignedNodeId());
+ assertNull(unassignedShardDecision.getFinalExplanation());
+ assertNull(unassignedShardDecision.getNodeDecisions());
+ expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalDecisionSafe());
+ expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalExplanationSafe());
+ }
+
+ public void testNoDecision() {
+ final AllocationStatus allocationStatus = randomFrom(
+ AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA
+ );
+ UnassignedShardDecision noDecision = UnassignedShardDecision.noDecision(allocationStatus, "something is wrong");
+ assertTrue(noDecision.isDecisionTaken());
+ assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
+ assertEquals(allocationStatus, noDecision.getAllocationStatus());
+ assertEquals("something is wrong", noDecision.getFinalExplanation());
+ assertNull(noDecision.getNodeDecisions());
+ assertNull(noDecision.getAssignedNodeId());
+ assertNull(noDecision.getAllocationId());
+
+ Map<String, Decision> nodeDecisions = new HashMap<>();
+ nodeDecisions.put("node1", Decision.NO);
+ nodeDecisions.put("node2", Decision.NO);
+ noDecision = UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, "something is wrong", nodeDecisions);
+ assertTrue(noDecision.isDecisionTaken());
+ assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
+ assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus());
+ assertEquals("something is wrong", noDecision.getFinalExplanation());
+ assertEquals(nodeDecisions, noDecision.getNodeDecisions());
+ assertNull(noDecision.getAssignedNodeId());
+ assertNull(noDecision.getAllocationId());
+
+ // test bad values
+ expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(null, "a"));
+ expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, null));
+ }
+
+ public void testThrottleDecision() {
+ Map<String, Decision> nodeDecisions = new HashMap<>();
+ nodeDecisions.put("node1", Decision.NO);
+ nodeDecisions.put("node2", Decision.THROTTLE);
+ UnassignedShardDecision throttleDecision = UnassignedShardDecision.throttleDecision("too much happening", nodeDecisions);
+ assertTrue(throttleDecision.isDecisionTaken());
+ assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecision().type());
+ assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus());
+ assertEquals("too much happening", throttleDecision.getFinalExplanation());
+ assertEquals(nodeDecisions, throttleDecision.getNodeDecisions());
+ assertNull(throttleDecision.getAssignedNodeId());
+ assertNull(throttleDecision.getAllocationId());
+
+ // test bad values
+ expectThrows(NullPointerException.class, () -> UnassignedShardDecision.throttleDecision(null, Collections.emptyMap()));
+ }
+
+ public void testYesDecision() {
+ Map<String, Decision> nodeDecisions = new HashMap<>();
+ nodeDecisions.put("node1", Decision.YES);
+ nodeDecisions.put("node2", Decision.NO);
+ String allocId = randomBoolean() ? "allocId" : null;
+ UnassignedShardDecision yesDecision = UnassignedShardDecision.yesDecision(
+ "node was very kind", "node1", allocId, nodeDecisions
+ );
+ assertTrue(yesDecision.isDecisionTaken());
+ assertEquals(Decision.Type.YES, yesDecision.getFinalDecision().type());
+ assertNull(yesDecision.getAllocationStatus());
+ assertEquals("node was very kind", yesDecision.getFinalExplanation());
+ assertEquals(nodeDecisions, yesDecision.getNodeDecisions());
+ assertEquals("node1", yesDecision.getAssignedNodeId());
+ assertEquals(allocId, yesDecision.getAllocationId());
+
+ expectThrows(NullPointerException.class,
+ () -> UnassignedShardDecision.yesDecision(null, "a", randomBoolean() ? "a" : null, Collections.emptyMap()));
+ expectThrows(NullPointerException.class,
+ () -> UnassignedShardDecision.yesDecision("a", null, null, Collections.emptyMap()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
index 103e902738..e711354b18 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
@@ -19,17 +19,17 @@
package org.elasticsearch.cluster.routing.allocation;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class);
+ private final Logger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class);
public void testUpdateNumberOfReplicas() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
@@ -54,127 +54,114 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
- assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
- assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+ assertThat(initialRoutingTable.index("test").shards().size(), equalTo(1));
+ assertThat(initialRoutingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(initialRoutingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(initialRoutingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(initialRoutingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(initialRoutingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(initialRoutingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start all the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("Start all the replica shards");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
- final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
- final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ final String nodeHoldingPrimary = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ final String nodeHoldingReplica = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
logger.info("add another replica");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(2).build();
+ RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(2).build();
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build();
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(2));
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
logger.info("Add another node and start the added replica");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
- assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ assertThat(newState, not(equalTo(clusterState)));
+ clusterState = newState;
+
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
logger.info("now remove a replica");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(1).build();
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build();
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(1));
- assertThat(prevRoutingTable != routingTable, equalTo(true));
- assertThat(routingTable.index("test").shards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
- assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
- assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
- assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
logger.info("do a reroute, should remain the same");
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
-
- assertThat(prevRoutingTable != routingTable, equalTo(false));
+ newState = strategy.reroute(clusterState, "reroute");
+ assertThat(newState, equalTo(clusterState));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
index e9c82e7e18..062a018a82 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
@@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -49,8 +50,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.test.ESAllocationTestCase;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays;
import java.util.HashMap;
@@ -65,11 +65,10 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
public class DiskThresholdDeciderTests extends ESAllocationTestCase {
-
+
DiskThresholdDecider makeDecider(Settings settings) {
return new DiskThresholdDecider(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
@@ -114,35 +113,32 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ final RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData)
- .routingTable(routingTable).build();
+ .routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Primary shard should be initializing, replica should not
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that we're able to start the primary
@@ -151,9 +147,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
@@ -164,8 +158,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Assert that the replica is initialized now that node3 is available with enough space
@@ -173,9 +166,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
@@ -203,10 +194,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -234,10 +224,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -252,8 +241,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -261,9 +249,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> apply INITIALIZING shards");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
@@ -315,18 +301,18 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData)
- .routingTable(routingTable).build();
+ .routingTable(initialRoutingTable).build();
logger.info("--> adding node1 and node2 node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
@@ -334,8 +320,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Primary should initialize, even though both nodes are over the limit initialize
@@ -373,19 +358,16 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Now the replica should be able to initialize
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that we're able to start the primary and replica, since they were both initializing
@@ -403,8 +385,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Assert that the replica is initialized now that node3 is available with enough space
@@ -412,9 +393,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that all replicas could be started
@@ -442,10 +421,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -473,10 +451,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -491,8 +468,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node4"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started
@@ -502,9 +478,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> apply INITIALIZING shards");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// primary shard already has been relocated away
@@ -519,8 +493,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node5"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// Shards remain started on node3 and node4
@@ -531,9 +504,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> apply INITIALIZING shards");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> final cluster state:");
logShardStates(clusterState);
@@ -584,7 +555,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
@@ -654,7 +625,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
@@ -737,10 +708,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
+ DiskThresholdDecider decider = makeDecider(diskSettings);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
- new HashSet<>(Arrays.asList(
- new SameShardAllocationDecider(Settings.EMPTY),
- makeDecider(diskSettings))));
+ new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), decider)));
ClusterInfoService cis = new ClusterInfoService() {
@Override
@@ -759,37 +729,34 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData)
- .routingTable(routingTable).build();
+ .routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// shards should be initializing
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
logger.info("--> start the shards");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
- .routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that we're able to start the primary and replicas
@@ -803,8 +770,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(relocate1);
- routingTable = strategy.reroute(clusterState, cmds, false, false).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, cmds, false, false).getClusterState();
logShardStates(clusterState);
AllocationCommand relocate2 = new MoveAllocationCommand("test2", 0, "node2", "node3");
@@ -815,7 +781,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
// node3, which will put it over the low watermark when it
// completes, with shard relocations taken into account this should
// throw an exception about not being able to complete
- strategy.reroute(clusterState, cmds, false, false).routingTable();
+ strategy.reroute(clusterState, cmds, false, false);
fail("should not have been able to reroute the shard");
} catch (IllegalArgumentException e) {
assertThat("can't allocated because there isn't enough room: " + e.getMessage(),
@@ -841,6 +807,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
shardSizesBuilder.put("[test][0][p]", 40L);
shardSizesBuilder.put("[test][1][p]", 40L);
+ shardSizesBuilder.put("[foo][0][p]", 10L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
@@ -848,10 +815,12 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
+ .put(IndexMetaData.builder("foo").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
+ .addAsNew(metaData.index("foo"))
.build();
DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", new LocalTransportAddress("1"), emptyMap(),
@@ -862,13 +831,13 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
- .routingTable(routingTable)
+ .routingTable(initialRoutingTable)
.nodes(discoveryNodes)
.build();
// Two shards consuming each 80% of disk space while 70% is allowed, so shard 0 isn't allowed here
- ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, null, true, ShardRoutingState.STARTED);
- ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", null, null, true, ShardRoutingState.STARTED);
+ ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
+ ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED);
RoutingNode firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
RoutingTable.Builder builder = RoutingTable.builder().add(
IndexRoutingTable.builder(firstRouting.index())
@@ -888,8 +857,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(decision.type(), equalTo(Decision.Type.NO));
// Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay
- firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, null, true, ShardRoutingState.STARTED);
- secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", null, true, ShardRoutingState.RELOCATING);
+ firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
+ secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING);
+ ShardRouting fooRouting = TestShardRouting.newShardRouting("foo", 0, "node1", null, true, ShardRoutingState.UNASSIGNED);
firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
builder = RoutingTable.builder().add(
IndexRoutingTable.builder(firstRouting.index())
@@ -907,6 +877,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
false);
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
+ decision = diskThresholdDecider.canAllocate(fooRouting, firstRoutingNode, routingAllocation);
+ assertThat(decision.type(), equalTo(Decision.Type.NO));
// Creating AllocationService instance and the services it depends on...
ClusterInfoService cis = new ClusterInfoService() {
@@ -928,11 +900,11 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
// Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away
// and therefor we will have sufficient disk space on node1.
- RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute");
- assertThat(result.changed(), is(false));
+ ClusterState result = strategy.reroute(clusterState, "reroute");
+ assertThat(result, equalTo(clusterState));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
@@ -965,7 +937,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
@@ -978,13 +950,13 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
- .routingTable(routingTable)
+ .routingTable(initialRoutingTable)
.nodes(discoveryNodes)
.build();
// Two shards consumes 80% of disk space in data node, but we have only one data node, shards should remain.
- ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED);
- ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", null, null, true, ShardRoutingState.STARTED);
+ ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED);
+ ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", null, true, ShardRoutingState.STARTED);
RoutingNode firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting);
RoutingTable.Builder builder = RoutingTable.builder().add(
@@ -1026,8 +998,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
- .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis);
- RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute");
+ .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
+ ClusterState result = strategy.reroute(clusterState, "reroute");
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2"));
@@ -1043,8 +1015,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(discoveryNode3)).build();
- firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED);
- secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", null, true, ShardRoutingState.RELOCATING);
+ firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED);
+ secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", true, ShardRoutingState.RELOCATING);
firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting);
builder = RoutingTable.builder().add(
IndexRoutingTable.builder(firstRouting.index())
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java
index 56c7d69c59..5e8f341527 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java
@@ -21,15 +21,17 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterInfo;
-import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.DiskUsage;
-import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -42,10 +44,8 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Arrays;
import java.util.Collections;
@@ -53,7 +53,6 @@ import java.util.HashSet;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
-import static org.hamcrest.CoreMatchers.equalTo;
/**
* Unit tests for the DiskThresholdDecider
@@ -70,7 +69,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
final Index index = metaData.index("test").getIndex();
- ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
DiscoveryNode node_0 = new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
DiscoveryNode node_1 = new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
@@ -99,7 +98,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of());
- RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
+ RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation));
assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation));
}
@@ -119,22 +118,22 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
.build();
final IndexMetaData indexMetaData = metaData.index("test");
- ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId());
test_0 = ShardRoutingHelper.moveToStarted(test_0);
shardRoutingMap.put(test_0, "/node0/least");
- ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId());
test_1 = ShardRoutingHelper.moveToStarted(test_1);
shardRoutingMap.put(test_1, "/node1/least");
- ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId());
test_2 = ShardRoutingHelper.moveToStarted(test_2);
shardRoutingMap.put(test_2, "/node1/most");
- ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId());
test_3 = ShardRoutingHelper.moveToStarted(test_3);
// Intentionally not in the shardRoutingMap. We want to test what happens when we don't know where it is.
@@ -166,7 +165,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
shardSizes.put("[test][2][p]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
- RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
+ RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation));
assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation));
try {
@@ -207,17 +206,17 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
RoutingAllocation allocation = new RoutingAllocation(null, null, clusterState, info, 0, false);
final Index index = new Index("test", "1234");
- ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, "node1");
test_0 = ShardRoutingHelper.moveToStarted(test_0);
test_0 = ShardRoutingHelper.relocate(test_0, "node2");
- ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, "node2");
test_1 = ShardRoutingHelper.moveToStarted(test_1);
test_1 = ShardRoutingHelper.relocate(test_1, "node1");
- ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_2 = ShardRoutingHelper.initialize(test_2, "node1");
test_2 = ShardRoutingHelper.moveToStarted(test_2);
@@ -232,13 +231,13 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/some/other/dev"));
assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/some/other/dev"));
- ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_3 = ShardRoutingHelper.initialize(test_3, "node1");
test_3 = ShardRoutingHelper.moveToStarted(test_3);
assertEquals(0L, DiskThresholdDecider.getExpectedShardSize(test_3, allocation, 0));
- ShardRouting other_0 = ShardRouting.newUnassigned(new ShardId("other", "5678", 0), null, randomBoolean(), new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting other_0 = ShardRouting.newUnassigned(new ShardId("other", "5678", 0), randomBoolean(), PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
other_0 = ShardRoutingHelper.initialize(other_0, "node2");
other_0 = ShardRoutingHelper.moveToStarted(other_0);
other_0 = ShardRoutingHelper.relocate(other_0, "node1");
@@ -280,32 +279,30 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
AllocationService allocationService = createAllocationService();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
- RoutingAllocation.Result result = allocationService.reroute(clusterState, "foo");
- clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build();
+ clusterState = allocationService.reroute(clusterState, "foo");
- result = allocationService.applyStartedShards(clusterState,
+ clusterState = allocationService.applyStartedShards(clusterState,
clusterState.getRoutingTable().index("test").shardsWithState(ShardRoutingState.UNASSIGNED));
- clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build();
RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, info, 0, false);
final Index index = new Index("test", "1234");
- ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, true,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true,
+ LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, "node1");
test_0 = ShardRoutingHelper.moveToStarted(test_0);
- ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), null, true,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), true,
+ LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, "node2");
test_1 = ShardRoutingHelper.moveToStarted(test_1);
- ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), null, true,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), true,
+ LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_2 = ShardRoutingHelper.initialize(test_2, "node1");
- ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), null, true,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), true,
+ LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_3 = ShardRoutingHelper.initialize(test_3, "node1");
assertEquals(500L, DiskThresholdDecider.getExpectedShardSize(test_3, allocation, 0));
assertEquals(500L, DiskThresholdDecider.getExpectedShardSize(test_2, allocation, 0));
@@ -314,15 +311,15 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0),
- null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
assertEquals(1110L, DiskThresholdDecider.getExpectedShardSize(target, allocation, 0));
ShardRouting target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 0),
- null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
assertEquals(110L, DiskThresholdDecider.getExpectedShardSize(target2, allocation, 0));
target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 1),
- null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
+ true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
assertEquals(1000L, DiskThresholdDecider.getExpectedShardSize(target2, allocation, 0));
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
index e880b09806..4230504f53 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
@@ -20,8 +20,10 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -30,11 +32,9 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.EnumSet;
import java.util.List;
@@ -51,7 +51,7 @@ import static org.hamcrest.Matchers.equalTo;
*/
public class EnableAllocationTests extends ESAllocationTestCase {
- private final ESLogger logger = Loggers.getLogger(EnableAllocationTests.class);
+ private final Logger logger = Loggers.getLogger(EnableAllocationTests.class);
public void testClusterEnableNone() {
AllocationService strategy = createAllocationService(Settings.builder()
@@ -125,27 +125,24 @@ public class EnableAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("disabled"))
.addAsNew(metaData.index("enabled"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> start the shards (replicas)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logger.info("--> verify only enabled index has been routed");
assertThat(clusterState.getRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
@@ -170,29 +167,26 @@ public class EnableAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.addAsNew(metaData.index("always_disabled"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(8));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
@@ -202,29 +196,24 @@ public class EnableAllocationTests extends ESAllocationTestCase {
.add(newNode("node2"))
.add(newNode("node3"))
).build();
- ClusterState prevState = clusterState;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(8));
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(0));
if (useClusterSetting) {
- prevState = clusterState;
- clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(Settings.builder()
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()).transientSettings(Settings.builder()
.put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes)
.build())).build();
} else {
- prevState = clusterState;
IndexMetaData meta = clusterState.getMetaData().index("test");
IndexMetaData meta1 = clusterState.getMetaData().index("always_disabled");
- clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices().put(IndexMetaData.builder(meta1))
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()).removeAllIndices().put(IndexMetaData.builder(meta1))
.put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build())))
.build();
}
clusterSettings.applySettings(clusterState.metaData().settings());
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6));
assertThat("expected 2 shards to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2));
List<ShardRouting> mutableShardRoutings = clusterState.getRoutingNodes().shardsWithState(RELOCATING);
@@ -249,8 +238,7 @@ public class EnableAllocationTests extends ESAllocationTestCase {
default:
fail("only replicas, primaries or all are allowed");
}
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(8));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
@@ -271,23 +259,21 @@ public class EnableAllocationTests extends ESAllocationTestCase {
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0))
.build();
- RoutingTable routingTable = RoutingTable.builder()
+ RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
- ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(6));
logger.info("--> start the shards (primaries)");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6));
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
@@ -297,25 +283,21 @@ public class EnableAllocationTests extends ESAllocationTestCase {
.add(newNode("node2"))
.add(newNode("node3"))
).build();
- ClusterState prevState = clusterState;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6));
assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(0));
+ metaData = clusterState.metaData();
if (useClusterSetting) {
- prevState = clusterState;
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(Settings.builder()
.put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL)
.build())).build();
} else {
- prevState = clusterState;
IndexMetaData meta = clusterState.getMetaData().index("test");
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices()
.put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build();
}
clusterSettings.applySettings(clusterState.metaData().settings());
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4));
assertThat("expected 2 primaries to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2));
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
index 5f7e8bbfa2..3259598f69 100644
--- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -30,7 +30,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import static org.hamcrest.Matchers.equalTo;
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
index a0b6f7f040..7c11e2b8c2 100644
--- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
@@ -30,7 +30,7 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java
index b7eb532e10..b8527872d7 100644
--- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.cluster.service;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
@@ -125,7 +127,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.error("failed to execute callback in test {}", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
@@ -196,7 +198,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.error("failed to execute callback in test {}", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
@@ -270,7 +272,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.error("failed to execute callback in test {}", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
@@ -344,7 +346,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.error("failed to execute callback in test {}", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
@@ -361,7 +363,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
- @TestLogging("_root:debug,action.admin.cluster.tasks:trace")
+ @TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace")
public void testPendingUpdateTask() throws Exception {
Settings settings = Settings.builder()
.put("discovery.type", "local")
diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java
index 1002774d2c..af5dc422e6 100644
--- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java
@@ -18,8 +18,11 @@
*/
package org.elasticsearch.cluster.service;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -35,6 +38,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
@@ -69,9 +73,7 @@ import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
-import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.anyOf;
-import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
@@ -331,7 +333,7 @@ public class ClusterServiceTests extends ESTestCase {
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
- logger.error("unexpected failure: [{}]", e, source);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e);
failures.add(new Tuple<>(source, e));
updateLatch.countDown();
}
@@ -675,18 +677,30 @@ public class ClusterServiceTests extends ESTestCase {
latch.await();
}
- @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
+ @TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG,
- "*processing [test1]: took [1s] no change in cluster_state"));
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE,
- "*failed to execute cluster state update in [2s]*"));
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG,
- "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
-
- Logger rootLogger = Logger.getRootLogger();
- rootLogger.addAppender(mockAppender);
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test1",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.DEBUG,
+ "*processing [test1]: took [1s] no change in cluster_state"));
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test2",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.TRACE,
+ "*failed to execute cluster state update in [2s]*"));
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test3",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.DEBUG,
+ "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
+
+ Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
+ Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
@@ -741,7 +755,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
- // Additional update task to make sure all previous logging made it to the logger
+ // Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
@@ -761,25 +775,41 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
- rootLogger.removeAppender(mockAppender);
+ Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
- @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
+ @TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
- mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low",
- "cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *"));
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN,
- "*cluster state update task [test2] took [32s] above the warn threshold of *"));
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN,
- "*cluster state update task [test3] took [33s] above the warn threshold of *"));
- mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN,
- "*cluster state update task [test4] took [34s] above the warn threshold of *"));
-
- Logger rootLogger = Logger.getRootLogger();
- rootLogger.addAppender(mockAppender);
+ mockAppender.addExpectation(
+ new MockLogAppender.UnseenEventExpectation(
+ "test1 shouldn't see because setting is too low",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.WARN,
+ "*cluster state update task [test1] took [*] above the warn threshold of *"));
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test2",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.WARN,
+ "*cluster state update task [test2] took [32s] above the warn threshold of *"));
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test3",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.WARN,
+ "*cluster state update task [test3] took [33s] above the warn threshold of *"));
+ mockAppender.addExpectation(
+ new MockLogAppender.SeenEventExpectation(
+ "test4",
+ "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
+ Level.WARN,
+ "*cluster state update task [test4] took [34s] above the warn threshold of *"));
+
+ Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
+ Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
@@ -855,7 +885,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
- // Additional update task to make sure all previous logging made it to the logger
+ // Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
@@ -875,7 +905,7 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
- rootLogger.removeAppender(mockAppender);
+ Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
index 7f423d1bb9..a1962ceefb 100644
--- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.settings;
+import org.apache.logging.log4j.Level;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -331,27 +332,30 @@ public class ClusterSettingsIT extends ESIntegTestCase {
public void testLoggerLevelUpdate() {
assertAcked(prepareCreate("test"));
- final String rootLevel = ESLoggerFactory.getRootLogger().getLevel();
- final String testLevel = ESLoggerFactory.getLogger("test").getLevel();
- try {
- client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet();
- fail("Expected IllegalArgumentException");
- } catch (IllegalArgumentException e) {
- assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", e.getMessage());
- }
+
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
+
+ final IllegalArgumentException e =
+ expectThrows(
+ IllegalArgumentException.class,
+ () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet());
+ assertEquals("Unknown level constant [BOOM].", e.getMessage());
try {
- client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger.test", "TRACE").put("logger._root", "trace")).execute().actionGet();
- assertEquals("TRACE", ESLoggerFactory.getLogger("test").getLevel());
- assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel());
+ final Settings.Builder testSettings = Settings.builder().put("logger.test", "TRACE").put("logger._root", "trace");
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(testSettings).execute().actionGet();
+ assertEquals(Level.TRACE, ESLoggerFactory.getLogger("test").getLevel());
+ assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel());
} finally {
if (randomBoolean()) {
- client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("logger.test").putNull("logger._root")).execute().actionGet();
+ final Settings.Builder defaultSettings = Settings.builder().putNull("logger.test").putNull("logger._root");
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet();
} else {
- client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("logger.*")).execute().actionGet();
+ final Settings.Builder defaultSettings = Settings.builder().putNull("logger.*");
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet();
}
- assertEquals(testLevel, ESLoggerFactory.getLogger("test").getLevel());
- assertEquals(rootLevel, ESLoggerFactory.getRootLogger().getLevel());
+ assertEquals(level, ESLoggerFactory.getLogger("test").getLevel());
+ assertEquals(level, ESLoggerFactory.getRootLogger().getLevel());
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
index 5e8a99d82f..a88c24873f 100644
--- a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
@@ -20,7 +20,7 @@ package org.elasticsearch.cluster.shards;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
-import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
@@ -99,9 +99,9 @@ public class ClusterSearchShardsIT extends ESIntegTestCase {
client().admin().indices().prepareCreate("test2").setSettings(Settings.builder()
.put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
client().admin().indices().prepareAliases()
- .addAliasAction(AliasAction.newAddAliasAction("test1", "routing_alias").routing("ABC"))
- .addAliasAction(AliasAction.newAddAliasAction("test2", "routing_alias").routing("EFG"))
- .execute().actionGet();
+ .addAliasAction(AliasActions.add().index("test1").alias("routing_alias").routing("ABC"))
+ .addAliasAction(AliasActions.add().index("test2").alias("routing_alias").routing("EFG"))
+ .get();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("routing_alias").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
index 8dbda2838a..0aad8669cb 100644
--- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.structure;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -39,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Collections;
import java.util.HashMap;
@@ -250,14 +250,11 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
.add(newNode("node2", unmodifiableMap(node2Attributes)))
.localNodeId("node1")
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// after all are started, check routing iteration
ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
@@ -299,11 +296,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
.localNodeId("node1")
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
ShardsIterator shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("disk:ebs",clusterState.nodes());
assertThat(shardsIterator.size(), equalTo(1));
@@ -372,14 +367,11 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
.add(newNode("node2"))
.localNodeId("node1")
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
@@ -446,11 +438,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
.add(newNode("node3"))
.localNodeId("node1")
).build();
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.reroute(clusterState, "reroute");
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// When replicas haven't initialized, it comes back with the primary first, then initializing replicas
GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica_first");
@@ -471,11 +461,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
assertFalse(routing.primary());
assertTrue(routing.initializing());
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
- routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica");
diff --git a/core/src/test/java/org/elasticsearch/codecs/CodecTests.java b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java
index 7bd47ac70f..1a2108fa73 100644
--- a/core/src/test/java/org/elasticsearch/codecs/CodecTests.java
+++ b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java
@@ -47,18 +47,25 @@ public class CodecTests extends ESSingleNodeTestCase {
}
public void testAcceptPostingsFormat() throws IOException {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("field").field("type", "keyword").field("postings_format", Codec.getDefault().postingsFormat().getName()).endObject().endObject()
- .endObject().endObject().string();
int i = 0;
for (Version v : VersionUtils.allVersions()) {
if (v.onOrAfter(Version.V_2_0_0) == false) {
// no need to test, we don't support upgrading from these versions
continue;
}
- IndexService indexService = createIndex("test-" + i++, Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
+ IndexService indexService = createIndex("test-" + i++,
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
try {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", v.onOrAfter(Version.V_5_0_0_alpha1) ? "keyword" : "string")
+ .field("postings_format", Codec.getDefault().postingsFormat().getName())
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
parser.parse("type", new CompressedXContent(mapping));
if (v.onOrAfter(Version.V_2_0_0_beta1)) {
fail("Elasticsearch 2.0 should not support custom postings formats");
@@ -74,17 +81,24 @@ public class CodecTests extends ESSingleNodeTestCase {
}
public void testAcceptDocValuesFormat() throws IOException {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("field").field("type", "keyword").field("doc_values_format", Codec.getDefault().docValuesFormat().getName()).endObject().endObject()
- .endObject().endObject().string();
int i = 0;
for (Version v : VersionUtils.allVersions()) {
if (v.onOrAfter(Version.V_2_0_0) == false) {
// no need to test, we don't support upgrading from these versions
continue;
}
- IndexService indexService = createIndex("test-" + i++, Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
+ IndexService indexService = createIndex("test-" + i++,
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", v.onOrAfter(Version.V_5_0_0_alpha1) ? "keyword" : "string")
+ .field("doc_values_format", Codec.getDefault().docValuesFormat().getName())
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
try {
parser.parse("type", new CompressedXContent(mapping));
if (v.onOrAfter(Version.V_2_0_0_beta1)) {
diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
index 76376a4d30..21112b9787 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
@@ -58,7 +58,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
private static final GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory();
- public void testParse_simplePoint() throws IOException {
+ public void testParseSimplePoint() throws IOException {
String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
.startArray("coordinates").value(100.0).value(0.0).endArray()
.endObject().string();
@@ -67,7 +67,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
}
- public void testParse_lineString() throws IOException {
+ public void testParseLineString() throws IOException {
String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
.startArray("coordinates")
.startArray().value(100.0).value(0.0).endArray()
@@ -84,7 +84,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(expected), lineGeoJson);
}
- public void testParse_multiLineString() throws IOException {
+ public void testParseMultiLineString() throws IOException {
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiLineString")
.startArray("coordinates")
.startArray()
@@ -111,7 +111,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(expected), multilinesGeoJson);
}
- public void testParse_circle() throws IOException {
+ public void testParseCircle() throws IOException {
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "circle")
.startArray("coordinates").value(100.0).value(0.0).endArray()
.field("radius", "100m")
@@ -121,7 +121,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(expected, multilinesGeoJson);
}
- public void testParse_multiDimensionShapes() throws IOException {
+ public void testParseMultiDimensionShapes() throws IOException {
// multi dimension point
String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
.startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray()
@@ -147,7 +147,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson);
}
- public void testParse_envelope() throws IOException {
+ public void testParseEnvelope() throws IOException {
// test #1: envelope with expected coordinate order (TopLeft, BottomRight)
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
.startArray("coordinates")
@@ -192,7 +192,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
}
- public void testParse_polygonNoHoles() throws IOException {
+ public void testParsePolygonNoHoles() throws IOException {
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.startArray("coordinates")
.startArray()
@@ -217,7 +217,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
}
- public void testParse_invalidPoint() throws IOException {
+ public void testParseInvalidPoint() throws IOException {
// test case 1: create an invalid point object with multipoint data format
String invalidPoint1 = XContentFactory.jsonBuilder().startObject().field("type", "point")
.startArray("coordinates")
@@ -238,7 +238,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
}
- public void testParse_invalidMultipoint() throws IOException {
+ public void testParseInvalidMultipoint() throws IOException {
// test case 1: create an invalid multipoint object with single coordinate
String invalidMultipoint1 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint")
.startArray("coordinates").value(-74.011).value(40.753).endArray()
@@ -267,7 +267,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
}
- public void testParse_invalidMultiPolygon() throws IOException {
+ public void testParseInvalidMultiPolygon() throws IOException {
// test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring)
String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
.startArray("coordinates")
@@ -302,7 +302,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
}
- public void testParse_OGCPolygonWithoutHoles() throws IOException {
+ public void testParseOGCPolygonWithoutHoles() throws IOException {
// test 1: ccw poly not crossing dateline
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.startArray("coordinates")
@@ -384,7 +384,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertMultiPolygon(shape);
}
- public void testParse_OGCPolygonWithHoles() throws IOException {
+ public void testParseOGCPolygonWithHoles() throws IOException {
// test 1: ccw poly not crossing dateline
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.startArray("coordinates")
@@ -490,7 +490,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertMultiPolygon(shape);
}
- public void testParse_invalidPolygon() throws IOException {
+ public void testParseInvalidPolygon() throws IOException {
/**
* The following 3 test cases ensure proper error handling of invalid polygons
* per the GeoJSON specification
@@ -579,7 +579,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
}
- public void testParse_polygonWithHole() throws IOException {
+ public void testParsePolygonWithHole() throws IOException {
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.startArray("coordinates")
.startArray()
@@ -623,7 +623,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
}
- public void testParse_selfCrossingPolygon() throws IOException {
+ public void testParseSelfCrossingPolygon() throws IOException {
// test self crossing ccw poly not crossing dateline
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.startArray("coordinates")
@@ -644,7 +644,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
}
- public void testParse_multiPoint() throws IOException {
+ public void testParseMultiPoint() throws IOException {
String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint")
.startArray("coordinates")
.startArray().value(100.0).value(0.0).endArray()
@@ -658,7 +658,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(expected, multiPointGeoJson);
}
- public void testParse_multiPolygon() throws IOException {
+ public void testParseMultiPolygon() throws IOException {
// test #1: two polygons; one without hole, one with hole
String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
.startArray("coordinates")
@@ -770,7 +770,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson);
}
- public void testParse_geometryCollection() throws IOException {
+ public void testParseGeometryCollection() throws IOException {
String geometryCollectionGeoJson = XContentFactory.jsonBuilder().startObject()
.field("type", "GeometryCollection")
.startArray("geometries")
@@ -822,7 +822,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
}
- public void testParse_orientationOption() throws IOException {
+ public void testParseOrientationOption() throws IOException {
// test 1: valid ccw (right handed system) poly not crossing dateline (with 'right' field)
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
.field("orientation", "right")
diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
index 9e5beabd9b..f1e6c1ba67 100644
--- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
@@ -30,19 +30,22 @@ import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.TreeMap;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.startsWith;
/**
* Tests for {@link BytesStreamOutput} paging behaviour.
@@ -462,11 +465,11 @@ public class BytesStreamsTests extends ESTestCase {
}
final BytesStreamOutput out = new BytesStreamOutput();
- out.writeMapOfLists(expected);
+ out.writeMapOfLists(expected, StreamOutput::writeString, StreamOutput::writeString);
final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes()));
- final Map<String, List<String>> loaded = in.readMapOfLists();
+ final Map<String, List<String>> loaded = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
assertThat(loaded.size(), equalTo(expected.size()));
@@ -622,4 +625,50 @@ public class BytesStreamsTests extends ESTestCase {
out.writeBoolean(value);
}
}
+
+ public void testWriteMapWithConsistentOrder() throws IOException {
+ Map<String, String> map =
+ randomMap(new TreeMap<>(), randomIntBetween(2, 20),
+ () -> randomAsciiOfLength(5),
+ () -> randomAsciiOfLength(5));
+
+ Map<String, Object> reverseMap = new TreeMap<>(Collections.reverseOrder());
+ reverseMap.putAll(map);
+
+ List<String> mapKeys = map.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
+ List<String> reverseMapKeys = reverseMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
+
+ assertNotEquals(mapKeys, reverseMapKeys);
+
+ BytesStreamOutput output = new BytesStreamOutput();
+ BytesStreamOutput reverseMapOutput = new BytesStreamOutput();
+ output.writeMapWithConsistentOrder(map);
+ reverseMapOutput.writeMapWithConsistentOrder(reverseMap);
+
+ assertEquals(output.bytes(), reverseMapOutput.bytes());
+ }
+
+ public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException {
+ Map<String, String> streamOutMap =
+ randomMap(new HashMap<>(), randomIntBetween(2, 20),
+ () -> randomAsciiOfLength(5),
+ () -> randomAsciiOfLength(5));
+ BytesStreamOutput streamOut = new BytesStreamOutput();
+ streamOut.writeMapWithConsistentOrder(streamOutMap);
+ StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes()));
+ Map<String, Object> streamInMap = in.readMap();
+ assertEquals(streamOutMap, streamInMap);
+ }
+
+ public void testWriteMapWithConsistentOrderWithLinkedHashMapShouldThrowAssertError() throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ Map<String, Object> map = new LinkedHashMap<>();
+ Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map));
+ assertEquals(AssertionError.class, e.getClass());
+ }
+
+ private static <K, V> Map<K, V> randomMap(Map<K, V> map, int size, Supplier<K> keyGenerator, Supplier<V> valueGenerator) {
+ IntStream.range(0, size).forEach(i -> map.put(keyGenerator.get(), valueGenerator.get()));
+ return map;
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java b/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java
deleted file mode 100644
index 67a6c0555c..0000000000
--- a/core/src/test/java/org/elasticsearch/common/logging/ESLoggerTests.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.logging;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LocationInfo;
-import org.apache.log4j.spi.LoggingEvent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.test.ESTestCase;
-import org.junit.After;
-
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.notNullValue;
-
-public class ESLoggerTests extends ESTestCase {
-
- private ESLogger esTestLogger;
- private TestAppender testAppender;
- private String testLevel;
- private DeprecationLogger deprecationLogger;
- private TestAppender deprecationAppender;
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- this.testLevel = ESLoggerFactory.getLogger("test").getLevel();
- LogConfigurator.reset();
- Path configDir = getDataPath("config");
- // Need to set custom path.conf so we can use a custom logging.yml file for the test
- Settings settings = Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build();
- LogConfigurator.configure(settings, true);
-
- esTestLogger = ESLoggerFactory.getLogger("test");
- Logger testLogger = esTestLogger.getLogger();
- assertThat(testLogger.getLevel(), equalTo(Level.TRACE));
- testAppender = new TestAppender();
- testLogger.addAppender(testAppender);
-
- // deprecation setup, needs to be set to debug to log
- deprecationLogger = ESLoggerFactory.getDeprecationLogger("test");
- deprecationAppender = new TestAppender();
- ESLogger logger = ESLoggerFactory.getLogger("deprecation.test");
- logger.setLevel("DEBUG");
- logger.getLogger().addAppender(deprecationAppender);
- }
-
- @Override
- @After
- public void tearDown() throws Exception {
- super.tearDown();
- esTestLogger.setLevel(testLevel);
- Logger testLogger = esTestLogger.getLogger();
- testLogger.removeAppender(testAppender);
- Logger deprecationLogger = ESLoggerFactory.getLogger("deprecation.test").getLogger();
- deprecationLogger.removeAppender(deprecationAppender);
- }
-
- public void testLocationInfoTest() {
- esTestLogger.error("This is an error");
- esTestLogger.warn("This is a warning");
- esTestLogger.info("This is an info");
- esTestLogger.debug("This is a debug");
- esTestLogger.trace("This is a trace");
- List<LoggingEvent> events = testAppender.getEvents();
- assertThat(events, notNullValue());
- assertThat(events.size(), equalTo(5));
- LoggingEvent event = events.get(0);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.ERROR));
- assertThat(event.getRenderedMessage(), equalTo("This is an error"));
- LocationInfo locationInfo = event.getLocationInformation();
- assertThat(locationInfo, notNullValue());
- assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
- assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
- event = events.get(1);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.WARN));
- assertThat(event.getRenderedMessage(), equalTo("This is a warning"));
- locationInfo = event.getLocationInformation();
- assertThat(locationInfo, notNullValue());
- assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
- assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
- event = events.get(2);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.INFO));
- assertThat(event.getRenderedMessage(), equalTo("This is an info"));
- locationInfo = event.getLocationInformation();
- assertThat(locationInfo, notNullValue());
- assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
- assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
- event = events.get(3);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.DEBUG));
- assertThat(event.getRenderedMessage(), equalTo("This is a debug"));
- locationInfo = event.getLocationInformation();
- assertThat(locationInfo, notNullValue());
- assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
- assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
- event = events.get(4);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.TRACE));
- assertThat(event.getRenderedMessage(), equalTo("This is a trace"));
- locationInfo = event.getLocationInformation();
- assertThat(locationInfo, notNullValue());
- assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
- assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
- }
-
- public void testDeprecationLogger() {
- deprecationLogger.deprecated("This is a deprecation message");
- List<LoggingEvent> deprecationEvents = deprecationAppender.getEvents();
- LoggingEvent event = deprecationEvents.get(0);
- assertThat(event, notNullValue());
- assertThat(event.getLevel(), equalTo(Level.DEBUG));
- assertThat(event.getRenderedMessage(), equalTo("This is a deprecation message"));
- }
-
- private static class TestAppender extends AppenderSkeleton {
-
- private List<LoggingEvent> events = new ArrayList<>();
-
- @Override
- public void close() {
- }
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
- @Override
- protected void append(LoggingEvent event) {
- // Forces it to generate the location information
- event.getLocationInformation();
- events.add(event);
- }
-
- public List<LoggingEvent> getEvents() {
- return events;
- }
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java
deleted file mode 100644
index 581a959936..0000000000
--- a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.logging;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardOpenOption;
-import java.util.Arrays;
-
-import org.apache.log4j.Appender;
-import org.apache.log4j.Logger;
-import org.apache.log4j.MDC;
-import org.elasticsearch.cli.MockTerminal;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.node.internal.InternalSettingsPreparer;
-import org.elasticsearch.test.ESTestCase;
-import org.junit.Before;
-
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.nullValue;
-
-/**
- *
- */
-public class LoggingConfigurationTests extends ESTestCase {
-
- @Before
- public void before() throws Exception {
- LogConfigurator.reset();
- }
-
- public void testResolveMultipleConfigs() throws Exception {
- String level = ESLoggerFactory.getLogger("test").getLevel();
- try {
- Path configDir = getDataPath("config");
- Settings settings = Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build();
- LogConfigurator.configure(settings, true);
-
- ESLogger esLogger = ESLoggerFactory.getLogger("test");
- Logger logger = esLogger.getLogger();
- Appender appender = logger.getAppender("console");
- assertThat(appender, notNullValue());
-
- esLogger = ESLoggerFactory.getLogger("second");
- logger = esLogger.getLogger();
- appender = logger.getAppender("console2");
- assertThat(appender, notNullValue());
-
- esLogger = ESLoggerFactory.getLogger("third");
- logger = esLogger.getLogger();
- appender = logger.getAppender("console3");
- assertThat(appender, notNullValue());
- } finally {
- ESLoggerFactory.getLogger("test").setLevel(level);
- }
- }
-
- public void testResolveJsonLoggingConfig() throws Exception {
- Path tmpDir = createTempDir();
- Path loggingConf = tmpDir.resolve(loggingConfiguration("json"));
- Files.write(loggingConf, "{\"json\": \"foo\"}".getBytes(StandardCharsets.UTF_8));
- Environment environment = new Environment(
- Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build());
-
- Settings.Builder builder = Settings.builder();
- LogConfigurator.resolveConfig(environment, builder);
-
- Settings logSettings = builder.build();
- assertThat(logSettings.get("json"), is("foo"));
- }
-
- public void testResolveYamlLoggingConfig() throws Exception {
- Path tmpDir = createTempDir();
- Path loggingConf1 = tmpDir.resolve(loggingConfiguration("yml"));
- Path loggingConf2 = tmpDir.resolve(loggingConfiguration("yaml"));
- Files.write(loggingConf1, "yml: bar".getBytes(StandardCharsets.UTF_8));
- Files.write(loggingConf2, "yaml: bar".getBytes(StandardCharsets.UTF_8));
- Environment environment = new Environment(
- Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build());
-
- Settings.Builder builder = Settings.builder();
- LogConfigurator.resolveConfig(environment, builder);
-
- Settings logSettings = builder.build();
- assertThat(logSettings.get("yml"), is("bar"));
- assertThat(logSettings.get("yaml"), is("bar"));
- }
-
- public void testResolveConfigInvalidFilename() throws Exception {
- Path tmpDir = createTempDir();
- Path invalidSuffix = tmpDir.resolve(loggingConfiguration(randomFrom(LogConfigurator.ALLOWED_SUFFIXES)) + randomInvalidSuffix());
- Files.write(invalidSuffix, "yml: bar".getBytes(StandardCharsets.UTF_8));
- Environment environment = new Environment(
- Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), invalidSuffix.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build());
-
- Settings.Builder builder = Settings.builder();
- LogConfigurator.resolveConfig(environment, builder);
-
- Settings logSettings = builder.build();
- assertThat(logSettings.get("yml"), nullValue());
- }
-
- // tests that custom settings are not overwritten by settings in the config file
- public void testResolveOrder() throws Exception {
- Path tmpDir = createTempDir();
- Path loggingConf = tmpDir.resolve(loggingConfiguration("yaml"));
- Files.write(loggingConf, "logger.test_resolve_order: INFO, file\n".getBytes(StandardCharsets.UTF_8));
- Files.write(loggingConf, "appender.file.type: file\n".getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND);
- Environment environment = InternalSettingsPreparer.prepareEnvironment(
- Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .put("logger.test_resolve_order", "TRACE, console")
- .put("appender.console.type", "console")
- .put("appender.console.layout.type", "consolePattern")
- .put("appender.console.layout.conversionPattern", "[%d{ISO8601}][%-5p][%-25c] %m%n")
- .build(), new MockTerminal());
- LogConfigurator.configure(environment.settings(), true);
- // args should overwrite whatever is in the config
- ESLogger esLogger = ESLoggerFactory.getLogger("test_resolve_order");
- Logger logger = esLogger.getLogger();
- Appender appender = logger.getAppender("console");
- assertThat(appender, notNullValue());
- assertTrue(logger.isTraceEnabled());
- appender = logger.getAppender("file");
- assertThat(appender, nullValue());
- }
-
- // tests that config file is not read when we call LogConfigurator.configure(Settings, false)
- public void testConfigNotRead() throws Exception {
- Path tmpDir = createTempDir();
- Path loggingConf = tmpDir.resolve(loggingConfiguration("yaml"));
- Files.write(loggingConf,
- Arrays.asList(
- "logger.test_config_not_read: INFO, console",
- "appender.console.type: console"),
- StandardCharsets.UTF_8);
- Environment environment = InternalSettingsPreparer.prepareEnvironment(
- Settings.builder()
- .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
- .build(), new MockTerminal());
- LogConfigurator.configure(environment.settings(), false);
- ESLogger esLogger = ESLoggerFactory.getLogger("test_config_not_read");
-
- assertNotNull(esLogger);
- Logger logger = esLogger.getLogger();
- Appender appender = logger.getAppender("console");
- // config was not read
- assertNull(appender);
- }
-
- private static String loggingConfiguration(String suffix) {
- return "logging." + randomAsciiOfLength(randomIntBetween(0, 10)) + "." + suffix;
- }
-
- private static String randomInvalidSuffix() {
- String randomSuffix;
- do {
- randomSuffix = randomAsciiOfLength(randomIntBetween(1, 5));
- } while (LogConfigurator.ALLOWED_SUFFIXES.contains(randomSuffix));
- return randomSuffix;
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
index 734068347b..2daea85967 100644
--- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
@@ -23,30 +23,34 @@ import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Table;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.ModuleTestCase;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.cat.AbstractCatAction;
import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
-import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.TransportInterceptor;
+import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
public class NetworkModuleTests extends ModuleTestCase {
- static class FakeTransportService extends TransportService {
- public FakeTransportService() {
- super(null, null, null);
- }
- }
-
static class FakeTransport extends AssertingLocalTransport {
public FakeTransport() {
super(null, null, null, null);
@@ -79,6 +83,7 @@ public class NetworkModuleTests extends ModuleTestCase {
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {}
}
+
static class FakeRestHandler extends BaseRestHandler {
public FakeRestHandler() {
super(null);
@@ -101,64 +106,159 @@ public class NetworkModuleTests extends ModuleTestCase {
}
}
- public void testRegisterTransportService() {
- Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom")
- .put(NetworkModule.HTTP_ENABLED.getKey(), false)
- .put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
- .build();
- NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
- module.registerTransportService("custom", FakeTransportService.class);
- assertBinding(module, TransportService.class, FakeTransportService.class);
- assertFalse(module.isTransportClient());
-
- // check it works with transport only as well
- module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
- module.registerTransportService("custom", FakeTransportService.class);
- assertBinding(module, TransportService.class, FakeTransportService.class);
- assertTrue(module.isTransportClient());
- }
-
public void testRegisterTransport() {
Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom")
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.build();
- NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
- module.registerTransport("custom", FakeTransport.class);
- assertBinding(module, Transport.class, FakeTransport.class);
+ Supplier<Transport> custom = FakeTransport::new;
+ NetworkPlugin plugin = new NetworkPlugin() {
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("custom", custom);
+ }
+ };
+ NetworkModule module = newNetworkModule(settings, false, plugin);
assertFalse(module.isTransportClient());
+ assertFalse(module.isHttpEnabled());
+ assertSame(custom, module.getTransportSupplier());
// check it works with transport only as well
- module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
- module.registerTransport("custom", FakeTransport.class);
- assertBinding(module, Transport.class, FakeTransport.class);
+ module = newNetworkModule(settings, true, plugin);
+ assertSame(custom, module.getTransportSupplier());
assertTrue(module.isTransportClient());
+ assertFalse(module.isHttpEnabled());
}
public void testRegisterHttpTransport() {
Settings settings = Settings.builder()
.put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom")
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
- NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
- module.registerHttpTransport("custom", FakeHttpTransport.class);
- assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class);
- assertFalse(module.isTransportClient());
+ Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
- // check registration not allowed for transport only
- module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
- assertTrue(module.isTransportClient());
- try {
- module.registerHttpTransport("custom", FakeHttpTransport.class);
- fail();
- } catch (IllegalArgumentException e) {
- assertTrue(e.getMessage().contains("Cannot register http transport"));
- assertTrue(e.getMessage().contains("for transport client"));
- }
+ NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
+ @Override
+ public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool,
+ BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("custom", custom);
+ }
+ });
+ assertSame(custom, module.getHttpServerTransportSupplier());
+ assertFalse(module.isTransportClient());
+ assertTrue(module.isHttpEnabled());
- // not added if http is disabled
settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
- module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
- assertNotBound(module, HttpServerTransport.class);
- assertFalse(module.isTransportClient());
+ NetworkModule newModule = newNetworkModule(settings, false);
+ assertFalse(newModule.isTransportClient());
+ assertFalse(newModule.isHttpEnabled());
+ expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier());
+ }
+
+ public void testOverrideDefault() {
+ Settings settings = Settings.builder()
+ .put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom")
+ .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), "default_custom")
+ .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "local")
+ .put(NetworkModule.TRANSPORT_TYPE_KEY, "default_custom").build();
+ Supplier<Transport> customTransport = FakeTransport::new;
+ Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
+ Supplier<HttpServerTransport> def = FakeHttpTransport::new;
+ NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("default_custom", customTransport);
+ }
+
+ @Override
+ public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool,
+ BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>();
+ supplierMap.put("custom", custom);
+ supplierMap.put("default_custom", def);
+ return supplierMap;
+ }
+ });
+ assertSame(custom, module.getHttpServerTransportSupplier());
+ assertSame(customTransport, module.getTransportSupplier());
+ }
+
+ public void testDefaultKeys() {
+ Settings settings = Settings.builder()
+ .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), "default_custom")
+ .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "default_custom").build();
+ Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
+ Supplier<HttpServerTransport> def = FakeHttpTransport::new;
+ Supplier<Transport> customTransport = FakeTransport::new;
+ NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("default_custom", customTransport);
+ }
+
+ @Override
+ public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool,
+ BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>();
+ supplierMap.put("custom", custom);
+ supplierMap.put("default_custom", def);
+ return supplierMap;
+ }
+ });
+
+ assertSame(def, module.getHttpServerTransportSupplier());
+ assertSame(customTransport, module.getTransportSupplier());
+ }
+
+ public void testRegisterInterceptor() {
+ Settings settings = Settings.builder()
+ .put(NetworkModule.HTTP_ENABLED.getKey(), false)
+ .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
+
+ TransportInterceptor interceptor = new TransportInterceptor() {
+ };
+ NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
+ @Override
+ public List<TransportInterceptor> getTransportInterceptors() {
+ return Collections.singletonList(interceptor);
+ }
+ });
+
+ TransportInterceptor transportInterceptor = module.getTransportInterceptor();
+ assertTrue(transportInterceptor instanceof NetworkModule.CompositeTransportInterceptor);
+ assertEquals(((NetworkModule.CompositeTransportInterceptor)transportInterceptor).transportInterceptors.size(), 1);
+ assertSame(((NetworkModule.CompositeTransportInterceptor)transportInterceptor).transportInterceptors.get(0), interceptor);
+
+ NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> {
+ newNetworkModule(settings, false, new NetworkPlugin() {
+ @Override
+ public List<TransportInterceptor> getTransportInterceptors() {
+ return Collections.singletonList(null);
+ }
+ });
+ });
+ assertEquals("interceptor must not be null", nullPointerException.getMessage());
+
+ }
+
+ private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) {
+ return new NetworkModule(settings, transportClient, Arrays.asList(plugins), null, null, null, null, null);
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java
index e5b95f258a..85a7472416 100644
--- a/core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java
@@ -22,6 +22,10 @@ package org.elasticsearch.common.network;
import org.elasticsearch.test.ESTestCase;
import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.util.Collections;
+
+import static org.hamcrest.Matchers.containsString;
/**
* Tests for network utils. Please avoid using any methods that cause DNS lookups!
@@ -74,4 +78,30 @@ public class NetworkUtilsTests extends ESTestCase {
assertArrayEquals(new InetAddress[] { InetAddress.getByName("127.0.0.1") }, NetworkUtils.filterIPV4(addresses));
assertArrayEquals(new InetAddress[] { InetAddress.getByName("::1") }, NetworkUtils.filterIPV6(addresses));
}
+
+ /**
+ * Test that selecting by name is possible and properly matches the addresses on all interfaces and virtual
+ * interfaces.
+ *
+ * Note that to avoid that this test fails when interfaces are down or they do not have addresses assigned to them,
+ * they are ignored.
+ */
+ public void testAddressInterfaceLookup() throws Exception {
+ for (NetworkInterface netIf : NetworkUtils.getInterfaces()) {
+ if (!netIf.isUp() || Collections.list(netIf.getInetAddresses()).isEmpty()) {
+ continue;
+ }
+
+ String name = netIf.getName();
+ InetAddress[] expectedAddresses = Collections.list(netIf.getInetAddresses()).toArray(new InetAddress[0]);
+ InetAddress[] foundAddresses = NetworkUtils.getAddressesForInterface(name);
+ assertArrayEquals(expectedAddresses, foundAddresses);
+ }
+ }
+
+ public void testNonExistingInterface() throws Exception {
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
+ () -> NetworkUtils.getAddressesForInterface("non-existing"));
+ assertThat(exception.getMessage(), containsString("No interface named 'non-existing' found"));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java
new file mode 100644
index 0000000000..24ce166d15
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.settings.Setting.Property;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.PageCacheRecycler;
+import org.elasticsearch.indices.IndexingMemoryController;
+import org.elasticsearch.indices.IndicesQueryCache;
+import org.elasticsearch.indices.IndicesRequestCache;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.test.ESTestCase;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+
+public class MemorySizeSettingsTests extends ESTestCase {
+
+ public void testPageCacheLimitHeapSetting() {
+ assertMemorySizeSetting(PageCacheRecycler.LIMIT_HEAP_SETTING, "cache.recycler.page.limit.heap",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.1)));
+ }
+
+ public void testIndexBufferSizeSetting() {
+ assertMemorySizeSetting(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "indices.memory.index_buffer_size",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.1)));
+ }
+
+ public void testQueryCacheSizeSetting() {
+ assertMemorySizeSetting(IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, "indices.queries.cache.size",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.1)));
+ }
+
+ public void testIndicesRequestCacheSetting() {
+ assertMemorySizeSetting(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, "indices.requests.cache.size",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.01)));
+ }
+
+ public void testCircuitBreakerSettings() {
+ assertMemorySizeSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.total.limit",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.7)));
+ assertMemorySizeSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)));
+ assertMemorySizeSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.request.limit",
+ new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)));
+ assertMemorySizeSetting(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING,
+ "network.breaker.inflight_requests.limit", new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())));
+ }
+
+ public void testIndicesFieldDataCacheSetting() {
+ assertMemorySizeSetting(IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, "indices.fielddata.cache.size",
+ new ByteSizeValue(-1));
+ }
+
+ private void assertMemorySizeSetting(Setting<ByteSizeValue> setting, String settingKey, ByteSizeValue defaultValue) {
+ assertThat(setting, notNullValue());
+ assertThat(setting.getKey(), equalTo(settingKey));
+ assertThat(setting.getProperties(), hasItem(Property.NodeScope));
+ assertThat(setting.getDefault(Settings.EMPTY),
+ equalTo(defaultValue));
+ Settings settingWithPercentage = Settings.builder().put(settingKey, "25%").build();
+ assertThat(setting.get(settingWithPercentage),
+ equalTo(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.25))));
+ Settings settingWithBytesValue = Settings.builder().put(settingKey, "1024b").build();
+ assertThat(setting.get(settingWithBytesValue), equalTo(new ByteSizeValue(1024)));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
index dee20d6b32..635c00c186 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
@@ -18,11 +18,13 @@
*/
package org.elasticsearch.common.settings;
+import org.apache.logging.log4j.Level;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.test.ESTestCase;
@@ -39,6 +41,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
+import static org.hamcrest.CoreMatchers.equalTo;
+
public class ScopedSettingsTests extends ESTestCase {
public void testAddConsumer() {
@@ -154,16 +158,22 @@ public class ScopedSettingsTests extends ESTestCase {
}
public void testDiff() throws IOException {
- Setting<Integer> foobarbaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope);
- Setting<Integer> foobar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope);
- ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz)));
+ Setting<Integer> fooBarBaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope);
+ Setting<Integer> fooBar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope);
+ Setting<List<String>> foorBarQuux =
+ Setting.listSetting("foo.bar.quux", Arrays.asList("a", "b", "c"), Function.identity(), Property.NodeScope);
+ ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fooBar, fooBarBaz, foorBarQuux)));
Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY);
- assertEquals(diff.getAsMap().size(), 1);
- assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1));
-
- diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build());
- assertEquals(diff.getAsMap().size(), 1);
- assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17));
+ assertThat(diff.getAsMap().size(), equalTo(2));
+ assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1));
+ assertThat(diff.get("foo.bar.quux", null), equalTo("[\"a\",\"b\",\"c\"]"));
+
+ diff = settings.diff(
+ Settings.builder().put("foo.bar", 5).build(),
+ Settings.builder().put("foo.bar.baz", 17).put("foo.bar.quux", "d,e,f").build());
+ assertThat(diff.getAsMap().size(), equalTo(2));
+ assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(17));
+ assertThat(diff.get("foo.bar.quux", null), equalTo("[\"d\",\"e\",\"f\"]"));
}
public void testUpdateTracer() {
@@ -302,45 +312,44 @@ public class ScopedSettingsTests extends ESTestCase {
}
public void testLoggingUpdates() {
- final String level = ESLoggerFactory.getRootLogger().getLevel();
- final String testLevel = ESLoggerFactory.getLogger("test").getLevel();
- String property = randomFrom(ESLoggerFactory.LogLevel.values()).toString();
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
+ final Level testLevel = ESLoggerFactory.getLogger("test").getLevel();
+ Level property = randomFrom(Level.values());
Settings.Builder builder = Settings.builder().put("logger.level", property);
try {
ClusterSettings settings = new ClusterSettings(builder.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
- try {
- settings.validate(Settings.builder().put("logger._root", "boom").build());
- fail();
- } catch (IllegalArgumentException ex) {
- assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", ex.getMessage());
- }
+ IllegalArgumentException ex =
+ expectThrows(
+ IllegalArgumentException.class,
+ () -> settings.validate(Settings.builder().put("logger._root", "boom").build()));
+ assertEquals("Unknown level constant [BOOM].", ex.getMessage());
assertEquals(level, ESLoggerFactory.getRootLogger().getLevel());
settings.applySettings(Settings.builder().put("logger._root", "TRACE").build());
- assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel());
+ assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel());
settings.applySettings(Settings.builder().build());
assertEquals(property, ESLoggerFactory.getRootLogger().getLevel());
settings.applySettings(Settings.builder().put("logger.test", "TRACE").build());
- assertEquals("TRACE", ESLoggerFactory.getLogger("test").getLevel());
+ assertEquals(Level.TRACE, ESLoggerFactory.getLogger("test").getLevel());
settings.applySettings(Settings.builder().build());
- assertEquals(testLevel, ESLoggerFactory.getLogger("test").getLevel());
+ assertEquals(property, ESLoggerFactory.getLogger("test").getLevel());
} finally {
- ESLoggerFactory.getRootLogger().setLevel(level);
- ESLoggerFactory.getLogger("test").setLevel(testLevel);
+ Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
+ Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel);
}
}
public void testFallbackToLoggerLevel() {
- final String level = ESLoggerFactory.getRootLogger().getLevel();
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
try {
- ClusterSettings settings = new ClusterSettings(Settings.builder().put("logger.level", "ERROR").build(),
- ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ ClusterSettings settings =
+ new ClusterSettings(Settings.builder().put("logger.level", "ERROR").build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
assertEquals(level, ESLoggerFactory.getRootLogger().getLevel());
settings.applySettings(Settings.builder().put("logger._root", "TRACE").build());
- assertEquals("TRACE", ESLoggerFactory.getRootLogger().getLevel());
+ assertEquals(Level.TRACE, ESLoggerFactory.getRootLogger().getLevel());
settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default.
- assertEquals("ERROR", ESLoggerFactory.getRootLogger().getLevel());
+ assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel());
} finally {
- ESLoggerFactory.getRootLogger().setLevel(level);
+ Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
index b1504672f4..6ec9093536 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
@@ -22,6 +22,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
@@ -32,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class SettingTests extends ESTestCase {
@@ -47,11 +49,11 @@ public class SettingTests extends ESTestCase {
Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope);
assertFalse(byteSizeValueSetting.isGroupSetting());
ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
- assertEquals(byteSizeValue.bytes(), 1024);
+ assertEquals(byteSizeValue.getBytes(), 1024);
byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope);
byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
- assertEquals(byteSizeValue.bytes(), 2048);
+ assertEquals(byteSizeValue.getBytes(), 2048);
AtomicReference<ByteSizeValue> value = new AtomicReference<>(null);
@@ -68,6 +70,44 @@ public class SettingTests extends ESTestCase {
assertEquals(new ByteSizeValue(12), value.get());
}
+ public void testMemorySize() {
+ Setting<ByteSizeValue> memorySizeValueSetting = Setting.memorySizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic,
+ Property.NodeScope);
+
+ assertFalse(memorySizeValueSetting.isGroupSetting());
+ ByteSizeValue memorySizeValue = memorySizeValueSetting.get(Settings.EMPTY);
+ assertEquals(memorySizeValue.getBytes(), 1024);
+
+ memorySizeValueSetting = Setting.memorySizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope);
+ memorySizeValue = memorySizeValueSetting.get(Settings.EMPTY);
+ assertEquals(memorySizeValue.getBytes(), 2048);
+
+ memorySizeValueSetting = Setting.memorySizeSetting("a.byte.size", "50%", Property.Dynamic, Property.NodeScope);
+ assertFalse(memorySizeValueSetting.isGroupSetting());
+ memorySizeValue = memorySizeValueSetting.get(Settings.EMPTY);
+ assertEquals(memorySizeValue.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.5, 1.0);
+
+ memorySizeValueSetting = Setting.memorySizeSetting("a.byte.size", s -> "25%", Property.Dynamic, Property.NodeScope);
+ memorySizeValue = memorySizeValueSetting.get(Settings.EMPTY);
+ assertEquals(memorySizeValue.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.25, 1.0);
+
+ AtomicReference<ByteSizeValue> value = new AtomicReference<>(null);
+ ClusterSettings.SettingUpdater<ByteSizeValue> settingUpdater = memorySizeValueSetting.newUpdater(value::set, logger);
+ try {
+ settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY);
+ fail("no unit");
+ } catch (IllegalArgumentException ex) {
+ assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized",
+ ex.getMessage());
+ }
+
+ assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY));
+ assertEquals(new ByteSizeValue(12), value.get());
+
+ assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "20%").build(), Settings.EMPTY));
+ assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get());
+ }
+
public void testSimpleUpdate() {
Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope);
AtomicReference<Boolean> atomicBoolean = new AtomicReference<>(null);
@@ -478,4 +518,16 @@ public class SettingTests extends ESTestCase {
assertThat(ex.getMessage(), containsString("properties cannot be null for setting"));
}
}
+
+ public void testTimeValue() {
+ final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test");
+
+ Setting<TimeValue> setting = Setting.timeSetting("foo", random);
+ assertThat(setting.get(Settings.EMPTY), equalTo(random));
+
+ final int factor = randomIntBetween(1, 10);
+ setting = Setting.timeSetting("foo", (s) -> TimeValue.timeValueMillis(random.getMillis() * factor), TimeValue.ZERO);
+ assertThat(setting.get(Settings.builder().put("foo", "12h").build()), equalTo(TimeValue.timeValueHours(12)));
+ assertThat(setting.get(Settings.EMPTY).getMillis(), equalTo(random.getMillis() * factor));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java
index 692134916e..dc0545624d 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java
@@ -144,14 +144,9 @@ public class SettingsModuleTests extends ModuleTestCase {
{
Settings settings = Settings.builder().put("logger._root", "BOOM").put("logger.transport", "WOW").build();
- try {
- new SettingsModule(settings);
- fail();
- } catch (IllegalArgumentException ex) {
- assertEquals("No enum constant org.elasticsearch.common.logging.ESLoggerFactory.LogLevel.BOOM", ex.getMessage());
- }
+ IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings));
+ assertEquals("Unknown level constant [BOOM].", ex.getMessage());
}
-
}
public void testRegisterSettingsFilter() {
diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
index ab6d281894..719313d1c8 100644
--- a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
@@ -19,20 +19,23 @@
package org.elasticsearch.common.unit;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
+import java.io.IOException;
+
import static org.elasticsearch.common.unit.ByteSizeUnit.BYTES;
import static org.elasticsearch.common.unit.ByteSizeUnit.GB;
import static org.elasticsearch.common.unit.ByteSizeUnit.KB;
import static org.elasticsearch.common.unit.ByteSizeUnit.MB;
import static org.elasticsearch.common.unit.ByteSizeUnit.PB;
import static org.elasticsearch.common.unit.ByteSizeUnit.TB;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-/**
- *
- */
public class ByteSizeUnitTests extends ESTestCase {
+
public void testBytes() {
assertThat(BYTES.toBytes(1), equalTo(1L));
assertThat(BYTES.toKB(1024), equalTo(1L));
@@ -77,4 +80,23 @@ public class ByteSizeUnitTests extends ESTestCase {
assertThat(PB.toTB(1), equalTo(1024L));
assertThat(PB.toPB(1), equalTo(1L));
}
+
+ public void testSerialization() throws IOException {
+ for (ByteSizeUnit unit : ByteSizeUnit.values()) {
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ unit.writeTo(out);
+
+ try (StreamInput in = out.bytes().streamInput()) {
+ ByteSizeUnit deserialized = ByteSizeUnit.readFrom(in);
+ assertEquals(unit, deserialized);
+ }
+ }
+ }
+ }
+
+ public void testFromUnknownId() throws IOException {
+ final byte randomId = (byte) randomIntBetween(ByteSizeUnit.values().length + 1, 100);
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ByteSizeUnit.fromId(randomId));
+ assertThat(e.getMessage(), containsString("No byte size unit found for id [" + String.valueOf(randomId) + "]"));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
index b075e9d56d..5296e226fa 100644
--- a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
+++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
@@ -20,9 +20,13 @@
package org.elasticsearch.common.unit;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.MatcherAssert;
+import java.io.IOException;
+
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
@@ -32,24 +36,24 @@ import static org.hamcrest.Matchers.is;
*/
public class ByteSizeValueTests extends ESTestCase {
public void testActualPeta() {
- MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496L));
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L));
}
public void testActualTera() {
- MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104L));
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).getBytes(), equalTo(4398046511104L));
}
public void testActual() {
- MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296L));
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).getBytes(), equalTo(4294967296L));
}
public void testSimple() {
- assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).bytes()));
- assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).kb()));
- assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).mb()));
- assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).gb()));
- assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).tb()));
- assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).pb()));
+ assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).getBytes()));
+ assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).getKb()));
+ assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).getMb()));
+ assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).getGb()));
+ assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).getTb()));
+ assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).getPb()));
}
public void testEquality() {
@@ -165,4 +169,15 @@ public class ByteSizeValueTests extends ESTestCase {
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
}
}
+
+ public void testSerialization() throws IOException {
+ ByteSizeValue byteSizeValue = new ByteSizeValue(randomPositiveLong(), randomFrom(ByteSizeUnit.values()));
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ byteSizeValue.writeTo(out);
+ try (StreamInput in = out.bytes().streamInput()) {
+ ByteSizeValue deserializedByteSizeValue = new ByteSizeValue(in);
+ assertEquals(byteSizeValue, deserializedByteSizeValue);
+ }
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java b/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java
index b4bc99e964..8fabbcc60a 100644
--- a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java
+++ b/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java
@@ -18,49 +18,45 @@
*/
package org.elasticsearch.common.util;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
public class ExtensionPointTests extends ESTestCase {
public void testClassSet() {
- final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
- allocationDeciders.registerExtension(TestAllocationDecider.class);
+ final ExtensionPoint.ClassSet<TestBaseClass> allocationDeciders = new ExtensionPoint.ClassSet<>("test_class", TestBaseClass.class, Consumer.class);
+ allocationDeciders.registerExtension(TestImpl.class);
Injector injector = new ModulesBuilder().add(new Module() {
@Override
public void configure(Binder binder) {
- binder.bind(Settings.class).toInstance(Settings.EMPTY);
- binder.bind(Consumer.class).asEagerSingleton();
allocationDeciders.bind(binder);
}
}).createInjector();
- assertEquals(1, TestAllocationDecider.instances.get());
+ assertEquals(1, TestImpl.instances.get());
}
+ public static class TestBaseClass {}
+
public static class Consumer {
@Inject
- public Consumer(Set<AllocationDecider> deciders, TestAllocationDecider other) {
- // we require the TestAllocationDecider more than once to ensure it's bound as a singleton
+ public Consumer(Set<TestBaseClass> deciders, TestImpl other) {
+ // we require the TestImpl more than once to ensure it's bound as a singleton
}
}
- public static class TestAllocationDecider extends AllocationDecider {
+ public static class TestImpl extends TestBaseClass {
static final AtomicInteger instances = new AtomicInteger(0);
@Inject
- public TestAllocationDecider(Settings settings) {
- super(settings);
+ public TestImpl() {
instances.incrementAndGet();
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java
index 02adb78319..3f712c44d3 100644
--- a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java
@@ -18,11 +18,10 @@
*/
package org.elasticsearch.common.util.concurrent;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.component.Lifecycle;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.test.ESTestCase;
-
import org.mockito.InOrder;
import java.util.concurrent.Callable;
@@ -38,7 +37,7 @@ import static org.mockito.Mockito.when;
*/
public class AbstractLifecycleRunnableTests extends ESTestCase {
private final Lifecycle lifecycle = mock(Lifecycle.class);
- private final ESLogger logger = mock(ESLogger.class);
+ private final Logger logger = mock(Logger.class);
public void testDoRunOnlyRunsWhenNotStoppedOrClosed() throws Exception {
Callable<?> runCallable = mock(Callable.class);
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java
new file mode 100644
index 0000000000..3bf8e450bd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+
+public class AsyncIOProcessorTests extends ESTestCase {
+
+ public void testPut() throws InterruptedException {
+ boolean blockInternal = randomBoolean();
+ AtomicInteger received = new AtomicInteger(0);
+ AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {
+ @Override
+ protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
+ if (blockInternal) {
+ synchronized (this) {
+ for (Tuple<Object, Consumer<Exception>> c :candidates) {
+ received.incrementAndGet();
+ }
+ }
+ } else {
+ received.addAndGet(candidates.size());
+ }
+ }
+ };
+ Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
+ final int count = randomIntBetween(1000, 20000);
+ Thread[] thread = new Thread[randomIntBetween(3, 10)];
+ CountDownLatch latch = new CountDownLatch(thread.length);
+ for (int i = 0; i < thread.length; i++) {
+ thread[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.countDown();
+ latch.await();
+ for (int i = 0; i < count; i++) {
+ semaphore.acquire();
+ processor.put(new Object(), (ex) -> semaphore.release());
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ };
+ };
+ thread[i].start();
+ }
+
+ for (int i = 0; i < thread.length; i++) {
+ thread[i].join();
+ }
+ assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
+ assertEquals(count * thread.length, received.get());
+ }
+
+ public void testRandomFail() throws InterruptedException {
+ AtomicInteger received = new AtomicInteger(0);
+ AtomicInteger failed = new AtomicInteger(0);
+ AtomicInteger actualFailed = new AtomicInteger(0);
+ AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {
+ @Override
+ protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
+ received.addAndGet(candidates.size());
+ if (randomBoolean()) {
+ failed.addAndGet(candidates.size());
+ if (randomBoolean()) {
+ throw new IOException();
+ } else {
+ throw new RuntimeException();
+ }
+ }
+ }
+ };
+ Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
+ final int count = randomIntBetween(1000, 20000);
+ Thread[] thread = new Thread[randomIntBetween(3, 10)];
+ CountDownLatch latch = new CountDownLatch(thread.length);
+ for (int i = 0; i < thread.length; i++) {
+ thread[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.countDown();
+ latch.await();
+ for (int i = 0; i < count; i++) {
+ semaphore.acquire();
+ processor.put(new Object(), (ex) -> {
+ if (ex != null) {
+ actualFailed.incrementAndGet();
+ }
+ semaphore.release();
+ });
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ };
+ };
+ thread[i].start();
+ }
+
+ for (int i = 0; i < thread.length; i++) {
+ thread[i].join();
+ }
+ assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
+ assertEquals(count * thread.length, received.get());
+ assertEquals(actualFailed.get(), failed.get());
+ }
+
+ public void testConsumerCanThrowExceptions() {
+ AtomicInteger received = new AtomicInteger(0);
+ AtomicInteger notified = new AtomicInteger(0);
+
+ AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {
+ @Override
+ protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
+ received.addAndGet(candidates.size());
+ }
+ };
+ processor.put(new Object(), (e) -> {notified.incrementAndGet();throw new RuntimeException();});
+ processor.put(new Object(), (e) -> {notified.incrementAndGet();throw new RuntimeException();});
+ assertEquals(2, notified.get());
+ assertEquals(2, received.get());
+ }
+
+ public void testNullArguments() {
+ AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {
+ @Override
+ protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
+ }
+ };
+
+ expectThrows(NullPointerException.class, () -> processor.put(null, (e) -> {}));
+ expectThrows(NullPointerException.class, () -> processor.put(new Object(), null));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
index 461428581c..227918ff97 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
@@ -19,17 +19,669 @@
package org.elasticsearch.common.xcontent;
+import com.fasterxml.jackson.core.JsonGenerationException;
+import com.fasterxml.jackson.core.JsonGenerator;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.test.ESTestCase;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.Instant;
+import org.joda.time.ReadableInstant;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static java.util.Collections.singletonMap;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
public abstract class BaseXContentTestCase extends ESTestCase {
- public abstract XContentType xcontentType();
+ protected abstract XContentType xcontentType();
+
+ private XContentBuilder builder() throws IOException {
+ return XContentBuilder.builder(xcontentType().xContent());
+ }
+
+ public void testContentType() throws IOException {
+ assertThat(builder().contentType(), equalTo(xcontentType()));
+ }
+
+ public void testStartEndObject() throws IOException {
+ expectUnclosedException(() -> builder().startObject().bytes());
+ expectUnclosedException(() -> builder().startObject().close());
+ expectUnclosedException(() -> builder().startObject().string());
+
+ expectObjectException(() -> builder().endObject().bytes());
+ expectObjectException(() -> builder().endObject().close());
+ expectObjectException(() -> builder().endObject().string());
+
+ expectValueException(() -> builder().startObject("foo").endObject());
+ expectNonNullFieldException(() -> builder().startObject().startObject(null));
+
+ assertResult("{}", () -> builder().startObject().endObject());
+ assertResult("{'foo':{}}", () -> builder().startObject().startObject("foo").endObject().endObject());
+
+ assertResult("{'foo':{'bar':{}}}", () -> builder()
+ .startObject()
+ .startObject("foo")
+ .startObject("bar")
+ .endObject()
+ .endObject()
+ .endObject());
+ }
+
+ public void testStartEndArray() throws IOException {
+ expectUnclosedException(() -> builder().startArray().bytes());
+ expectUnclosedException(() -> builder().startArray().close());
+ expectUnclosedException(() -> builder().startArray().string());
+
+ expectArrayException(() -> builder().endArray().bytes());
+ expectArrayException(() -> builder().endArray().close());
+ expectArrayException(() -> builder().endArray().string());
+
+ expectValueException(() -> builder().startArray("foo").endObject());
+ expectFieldException(() -> builder().startObject().startArray().endArray().endObject());
+ expectNonNullFieldException(() -> builder().startObject().startArray(null).endArray().endObject());
+
+ assertResult("{'foo':[]}", () -> builder().startObject().startArray("foo").endArray().endObject());
+ assertResult("{'foo':[1,2,3]}", () -> builder()
+ .startObject()
+ .startArray("foo")
+ .value(1)
+ .value(2)
+ .value(3)
+ .endArray()
+ .endObject());
+ }
+
+ public void testField() throws IOException {
+ expectValueException(() -> builder().field("foo").bytes());
+ expectNonNullFieldException(() -> builder().field(null).bytes());
+ expectUnclosedException(() -> builder().startObject().field("foo").bytes());
+
+ assertResult("{'foo':'bar'}", () -> builder().startObject().field("foo").value("bar").endObject());
+ }
+
+ public void testNullField() throws IOException {
+ expectValueException(() -> builder().nullField("foo").bytes());
+ expectNonNullFieldException(() -> builder().nullField(null).bytes());
+ expectUnclosedException(() -> builder().startObject().nullField("foo").bytes());
+
+ assertResult("{'foo':null}", () -> builder().startObject().nullField("foo").endObject());
+ }
+
+ public void testNullValue() throws IOException {
+ assertResult("{'foo':null}", () -> builder().startObject().field("foo").nullValue().endObject());
+ }
+
+ public void testBooleans() throws IOException {
+ assertResult("{'boolean':null}", () -> builder().startObject().field("boolean", (Boolean) null).endObject());
+ assertResult("{'boolean':true}", () -> builder().startObject().field("boolean", Boolean.TRUE).endObject());
+ assertResult("{'boolean':false}", () -> builder().startObject().field("boolean", Boolean.FALSE).endObject());
+ assertResult("{'boolean':[true,false,true]}", () -> builder().startObject().array("boolean", true, false, true).endObject());
+ assertResult("{'boolean':[false,true]}", () -> builder().startObject().array("boolean", new boolean[]{false, true}).endObject());
+ assertResult("{'boolean':null}", () -> builder().startObject().array("boolean", (boolean[]) null).endObject());
+ assertResult("{'boolean':[]}", () -> builder().startObject().array("boolean", new boolean[]{}).endObject());
+ assertResult("{'boolean':null}", () -> builder().startObject().field("boolean").value((Boolean) null).endObject());
+ assertResult("{'boolean':true}", () -> builder().startObject().field("boolean").value(Boolean.TRUE).endObject());
+ assertResult("{'boolean':false}", () -> builder().startObject().field("boolean").value(Boolean.FALSE).endObject());
+ }
+
+ public void testBytes() throws IOException {
+ assertResult("{'byte':null}", () -> builder().startObject().field("byte", (Byte) null).endObject());
+ assertResult("{'byte':0}", () -> builder().startObject().field("byte", (byte) 0).endObject());
+ assertResult("{'byte':1}", () -> builder().startObject().field("byte", (byte) 1).endObject());
+ assertResult("{'byte':null}", () -> builder().startObject().field("byte").value((Byte) null).endObject());
+ assertResult("{'byte':0}", () -> builder().startObject().field("byte").value((byte) 0).endObject());
+ assertResult("{'byte':1}", () -> builder().startObject().field("byte").value((byte) 1).endObject());
+ }
+
+ public void testDoubles() throws IOException {
+ assertResult("{'double':null}", () -> builder().startObject().field("double", (Double) null).endObject());
+ assertResult("{'double':42.5}", () -> builder().startObject().field("double", Double.valueOf(42.5)).endObject());
+ assertResult("{'double':1.2}", () -> builder().startObject().field("double", 1.2).endObject());
+ assertResult("{'double':[42.0,43.0,45]}", () -> builder().startObject().array("double", 42.0, 43.0, 45).endObject());
+ assertResult("{'double':null}", () -> builder().startObject().array("double", (double[]) null).endObject());
+ assertResult("{'double':[]}", () -> builder().startObject().array("double", new double[]{}).endObject());
+ assertResult("{'double':null}", () -> builder().startObject().field("double").value((Double) null).endObject());
+ assertResult("{'double':0.001}", () -> builder().startObject().field("double").value(0.001).endObject());
+ assertResult("{'double':[1.7976931348623157E308,4.9E-324]}", () -> builder()
+ .startObject()
+ .array("double", new double[]{Double.MAX_VALUE, Double.MIN_VALUE})
+ .endObject());
+ }
+
+ public void testFloats() throws IOException {
+ assertResult("{'float':null}", () -> builder().startObject().field("float", (Float) null).endObject());
+ assertResult("{'float':42.5}", () -> builder().startObject().field("float", Float.valueOf(42.5f)).endObject());
+ assertResult("{'float':1.2}", () -> builder().startObject().field("float", 1.2f).endObject());
+ assertResult("{'float':null}", () -> builder().startObject().array("float", (float[]) null).endObject());
+ assertResult("{'float':[]}", () -> builder().startObject().array("float", new float[]{}).endObject());
+ assertResult("{'float':null}", () -> builder().startObject().field("float").value((Float) null).endObject());
+ assertResult("{'float':9.9E-7}", () -> builder().startObject().field("float").value(0.00000099f).endObject());
+ assertResult("{'float':[42.0,43.0,45.666668]}", () -> builder()
+ .startObject()
+ .array("float", 42.0f, 43.0f, 45.66666667f)
+ .endObject());
+ assertResult("{'float':[3.4028235E38,1.4E-45]}", () -> builder()
+ .startObject()
+ .array("float", new float[]{Float.MAX_VALUE, Float.MIN_VALUE})
+ .endObject());
+ }
+
+ public void testIntegers() throws IOException {
+ assertResult("{'integer':null}", () -> builder().startObject().field("integer", (Integer) null).endObject());
+ assertResult("{'integer':42}", () -> builder().startObject().field("integer", Integer.valueOf(42)).endObject());
+ assertResult("{'integer':3}", () -> builder().startObject().field("integer", 3).endObject());
+ assertResult("{'integer':[1,3,5,7,11]}", () -> builder().startObject().array("integer", 1, 3, 5, 7, 11).endObject());
+ assertResult("{'integer':null}", () -> builder().startObject().array("integer", (int[]) null).endObject());
+ assertResult("{'integer':[]}", () -> builder().startObject().array("integer", new int[]{}).endObject());
+ assertResult("{'integer':null}", () -> builder().startObject().field("integer").value((Integer) null).endObject());
+ assertResult("{'integer':42}", () -> builder().startObject().field("integer").value(42).endObject());
+ assertResult("{'integer':[2147483647,-2147483648]}", () -> builder()
+ .startObject()
+ .array("integer", new int[]{Integer.MAX_VALUE, Integer.MIN_VALUE})
+ .endObject());
+ }
+
+ public void testLongs() throws IOException {
+ assertResult("{'long':null}", () -> builder().startObject().field("long", (Long) null).endObject());
+ assertResult("{'long':42}", () -> builder().startObject().field("long", Long.valueOf(42L)).endObject());
+ assertResult("{'long':9223372036854775807}", () -> builder().startObject().field("long", 9_223_372_036_854_775_807L).endObject());
+ assertResult("{'long':[1,3,5,7,11]}", () -> builder().startObject().array("long", 1L, 3L, 5L, 7L, 11L).endObject());
+ assertResult("{'long':null}", () -> builder().startObject().array("long", (long[]) null).endObject());
+ assertResult("{'long':[]}", () -> builder().startObject().array("long", new long[]{}).endObject());
+ assertResult("{'long':null}", () -> builder().startObject().field("long").value((Long) null).endObject());
+ assertResult("{'long':42}", () -> builder().startObject().field("long").value(42).endObject());
+ assertResult("{'long':[2147483647,-2147483648]}", () -> builder()
+ .startObject()
+ .array("long", new long[]{Integer.MAX_VALUE, Integer.MIN_VALUE})
+ .endObject());
+ }
+
+ public void testShorts() throws IOException {
+ assertResult("{'short':null}", () -> builder().startObject().field("short", (Short) null).endObject());
+ assertResult("{'short':5000}", () -> builder().startObject().field("short", Short.valueOf((short) 5000)).endObject());
+ assertResult("{'short':null}", () -> builder().startObject().array("short", (short[]) null).endObject());
+ assertResult("{'short':[]}", () -> builder().startObject().array("short", new short[]{}).endObject());
+ assertResult("{'short':null}", () -> builder().startObject().field("short").value((Short) null).endObject());
+ assertResult("{'short':42}", () -> builder().startObject().field("short").value((short) 42).endObject());
+ assertResult("{'short':[1,3,5,7,11]}", () -> builder()
+ .startObject()
+ .array("short", (short) 1, (short) 3, (short) 5, (short) 7, (short) 11)
+ .endObject());
+ assertResult("{'short':[32767,-32768]}", () -> builder()
+ .startObject()
+ .array("short", new short[]{Short.MAX_VALUE, Short.MIN_VALUE})
+ .endObject());
+ }
+
+ public void testStrings() throws IOException {
+ assertResult("{'string':null}", () -> builder().startObject().field("string", (String) null).endObject());
+ assertResult("{'string':'value'}", () -> builder().startObject().field("string", "value").endObject());
+ assertResult("{'string':''}", () -> builder().startObject().field("string", "").endObject());
+ assertResult("{'string':null}", () -> builder().startObject().array("string", (String[]) null).endObject());
+ assertResult("{'string':[]}", () -> builder().startObject().array("string", Strings.EMPTY_ARRAY).endObject());
+ assertResult("{'string':null}", () -> builder().startObject().field("string").value((String) null).endObject());
+ assertResult("{'string':'42'}", () -> builder().startObject().field("string").value("42").endObject());
+ assertResult("{'string':['a','b','c','d']}", () -> builder()
+ .startObject()
+ .array("string", "a", "b", "c", "d")
+ .endObject());
+ }
+
+ public void testBinaryField() throws Exception {
+ assertResult("{'binary':null}", () -> builder().startObject().field("binary", (byte[]) null).endObject());
+
+ final byte[] randomBytes = randomBytes();
+ BytesReference bytes = builder().startObject().field("binary", randomBytes).endObject().bytes();
+
+ XContentParser parser = xcontentType().xContent().createParser(bytes);
+ assertSame(parser.nextToken(), Token.START_OBJECT);
+ assertSame(parser.nextToken(), Token.FIELD_NAME);
+ assertEquals(parser.currentName(), "binary");
+ assertTrue(parser.nextToken().isValue());
+ assertArrayEquals(randomBytes, parser.binaryValue());
+ assertSame(parser.nextToken(), Token.END_OBJECT);
+ assertNull(parser.nextToken());
+ }
+
+ public void testBinaryValue() throws Exception {
+ assertResult("{'binary':null}", () -> builder().startObject().field("binary").value((byte[]) null).endObject());
+
+ final byte[] randomBytes = randomBytes();
+ BytesReference bytes = builder().startObject().field("binary").value(randomBytes).endObject().bytes();
+
+ XContentParser parser = xcontentType().xContent().createParser(bytes);
+ assertSame(parser.nextToken(), Token.START_OBJECT);
+ assertSame(parser.nextToken(), Token.FIELD_NAME);
+ assertEquals(parser.currentName(), "binary");
+ assertTrue(parser.nextToken().isValue());
+ assertArrayEquals(randomBytes, parser.binaryValue());
+ assertSame(parser.nextToken(), Token.END_OBJECT);
+ assertNull(parser.nextToken());
+ }
+
+ public void testBinaryValueWithOffsetLength() throws Exception {
+ assertResult("{'binary':null}", () -> builder().startObject().field("binary").value(null, 0, 0).endObject());
+
+ final byte[] randomBytes = randomBytes();
+ final int offset = randomIntBetween(0, randomBytes.length - 1);
+ final int length = randomIntBetween(1, Math.max(1, randomBytes.length - offset - 1));
+
+ XContentBuilder builder = builder().startObject();
+ if (randomBoolean()) {
+ builder.field("bin", randomBytes, offset, length);
+ } else {
+ builder.field("bin").value(randomBytes, offset, length);
+ }
+ builder.endObject();
+
+ XContentParser parser = xcontentType().xContent().createParser(builder.bytes());
+ assertSame(parser.nextToken(), Token.START_OBJECT);
+ assertSame(parser.nextToken(), Token.FIELD_NAME);
+ assertEquals(parser.currentName(), "bin");
+ assertTrue(parser.nextToken().isValue());
+ assertArrayEquals(Arrays.copyOfRange(randomBytes, offset, offset + length), parser.binaryValue());
+ assertSame(parser.nextToken(), Token.END_OBJECT);
+ assertNull(parser.nextToken());
+ }
+
+ public void testBinaryUTF8() throws Exception {
+ assertResult("{'utf8':null}", () -> builder().startObject().utf8Field("utf8", null).endObject());
+
+ final BytesRef randomBytesRef = new BytesRef(randomBytes());
+ XContentBuilder builder = builder().startObject();
+ if (randomBoolean()) {
+ builder.utf8Field("utf8", randomBytesRef);
+ } else {
+ builder.field("utf8").utf8Value(randomBytesRef);
+ }
+ builder.endObject();
+
+ XContentParser parser = xcontentType().xContent().createParser(builder.bytes());
+ assertSame(parser.nextToken(), Token.START_OBJECT);
+ assertSame(parser.nextToken(), Token.FIELD_NAME);
+ assertEquals(parser.currentName(), "utf8");
+ assertTrue(parser.nextToken().isValue());
+ assertThat(parser.utf8Bytes().utf8ToString(), equalTo(randomBytesRef.utf8ToString()));
+ assertSame(parser.nextToken(), Token.END_OBJECT);
+ assertNull(parser.nextToken());
+ }
+
+ public void testText() throws Exception {
+ assertResult("{'text':null}", () -> builder().startObject().field("text", (Text) null).endObject());
+ assertResult("{'text':''}", () -> builder().startObject().field("text", new Text("")).endObject());
+ assertResult("{'text':'foo bar'}", () -> builder().startObject().field("text", new Text("foo bar")).endObject());
+
+ final BytesReference random = new BytesArray(randomBytes());
+ XContentBuilder builder = builder().startObject().field("text", new Text(random)).endObject();
+
+ XContentParser parser = xcontentType().xContent().createParser(builder.bytes());
+ assertSame(parser.nextToken(), Token.START_OBJECT);
+ assertSame(parser.nextToken(), Token.FIELD_NAME);
+ assertEquals(parser.currentName(), "text");
+ assertTrue(parser.nextToken().isValue());
+ assertThat(parser.utf8Bytes().utf8ToString(), equalTo(random.utf8ToString()));
+ assertSame(parser.nextToken(), Token.END_OBJECT);
+ assertNull(parser.nextToken());
+ }
+
+ public void testReadableInstant() throws Exception {
+ assertResult("{'instant':null}", () -> builder().startObject().field("instant", (ReadableInstant) null).endObject());
+ assertResult("{'instant':null}", () -> builder().startObject().field("instant").value((ReadableInstant) null).endObject());
+
+ final DateTime t1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC);
+
+ String expected = "{'t1':'2016-01-01T00:00:00.000Z'}";
+ assertResult(expected, () -> builder().startObject().field("t1", t1).endObject());
+ assertResult(expected, () -> builder().startObject().field("t1").value(t1).endObject());
+
+ final DateTime t2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC);
+
+ expected = "{'t2':'2016-12-25T07:59:42.213Z'}";
+ assertResult(expected, () -> builder().startObject().field("t2", t2).endObject());
+ assertResult(expected, () -> builder().startObject().field("t2").value(t2).endObject());
+
+ final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis());
+ final DateTime t3 = DateTime.now();
+
+ expected = "{'t3':'" + formatter.print(t3) + "'}";
+ assertResult(expected, () -> builder().startObject().field("t3", t3, formatter).endObject());
+ assertResult(expected, () -> builder().startObject().field("t3").value(t3, formatter).endObject());
+
+ final DateTime t4 = new DateTime(randomDateTimeZone());
+
+ expected = "{'t4':'" + formatter.print(t4) + "'}";
+ assertResult(expected, () -> builder().startObject().field("t4", t4, formatter).endObject());
+ assertResult(expected, () -> builder().startObject().field("t4").value(t4, formatter).endObject());
+
+ long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00
+ final DateTime t5 = new DateTime(date, randomDateTimeZone());
+
+ expected = "{'t5':'" + XContentBuilder.DEFAULT_DATE_PRINTER.print(t5) + "'}";
+ assertResult(expected, () -> builder().startObject().field("t5", t5).endObject());
+ assertResult(expected, () -> builder().startObject().field("t5").value(t5).endObject());
+
+ expected = "{'t5':'" + formatter.print(t5) + "'}";
+ assertResult(expected, () -> builder().startObject().field("t5", t5, formatter).endObject());
+ assertResult(expected, () -> builder().startObject().field("t5").value(t5, formatter).endObject());
+
+ Instant i1 = new Instant(1451606400000L); // 2016-01-01T00:00:00.000Z
+ expected = "{'i1':'2016-01-01T00:00:00.000Z'}";
+ assertResult(expected, () -> builder().startObject().field("i1", i1).endObject());
+ assertResult(expected, () -> builder().startObject().field("i1").value(i1).endObject());
+
+ Instant i2 = new Instant(1482652782213L); // 2016-12-25T07:59:42.213Z
+ expected = "{'i2':'" + formatter.print(i2) + "'}";
+ assertResult(expected, () -> builder().startObject().field("i2", i2, formatter).endObject());
+ assertResult(expected, () -> builder().startObject().field("i2").value(i2, formatter).endObject());
+
+ expectNonNullFormatterException(() -> builder().startObject().field("t3", t3, null).endObject());
+ expectNonNullFormatterException(() -> builder().startObject().field("t3").value(t3, null).endObject());
+ }
+
+ public void testDate() throws Exception {
+ assertResult("{'date':null}", () -> builder().startObject().field("date", (Date) null).endObject());
+ assertResult("{'date':null}", () -> builder().startObject().field("date").value((Date) null).endObject());
+
+ final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate();
+ assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1).endObject());
+ assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").value(d1).endObject());
+
+ final Date d2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC).toDate();
+ assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2", d2).endObject());
+ assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2").value(d2).endObject());
+
+ final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis());
+ final Date d3 = DateTime.now().toDate();
+
+ String expected = "{'d3':'" + formatter.print(d3.getTime()) + "'}";
+ assertResult(expected, () -> builder().startObject().field("d3", d3, formatter).endObject());
+ assertResult(expected, () -> builder().startObject().field("d3").value(d3, formatter).endObject());
+
+ expectNonNullFormatterException(() -> builder().startObject().field("d3", d3, null).endObject());
+ expectNonNullFormatterException(() -> builder().startObject().field("d3").value(d3, null).endObject());
+ expectNonNullFormatterException(() -> builder().value(null, 1L));
+ }
+
+ public void testDateField() throws Exception {
+ final Date d = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate();
+
+ assertResult("{'date_in_millis':1451606400000}", () -> builder()
+ .startObject()
+ .dateField("date_in_millis", "date", d.getTime())
+ .endObject());
+ assertResult("{'date':'2016-01-01T00:00:00.000Z','date_in_millis':1451606400000}", () -> builder()
+ .humanReadable(true)
+ .startObject
+ ().dateField("date_in_millis", "date", d.getTime())
+ .endObject());
+ }
+
+ public void testCalendar() throws Exception {
+ Calendar calendar = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toCalendar(Locale.ROOT);
+ assertResult("{'calendar':'2016-01-01T00:00:00.000Z'}", () -> builder()
+ .startObject()
+ .field("calendar")
+ .value(calendar)
+ .endObject());
+ }
+
+ public void testGeoPoint() throws Exception {
+ assertResult("{'geo':null}", () -> builder().startObject().field("geo", (GeoPoint) null).endObject());
+ assertResult("{'geo':{'lat':52.4267578125,'lon':13.271484375}}", () -> builder()
+ .startObject()
+ . field("geo", GeoPoint.fromGeohash("u336q"))
+ .endObject());
+ assertResult("{'geo':{'lat':52.5201416015625,'lon':13.4033203125}}", () -> builder()
+ .startObject()
+ .field("geo")
+ .value(GeoPoint.fromGeohash("u33dc1"))
+ .endObject());
+ }
+
+ public void testLatLon() throws Exception {
+ final String expected = "{'latlon':{'lat':13.271484375,'lon':52.4267578125}}";
+ assertResult(expected, () -> builder().startObject().latlon("latlon", 13.271484375, 52.4267578125).endObject());
+ assertResult(expected, () -> builder().startObject().field("latlon").latlon(13.271484375, 52.4267578125).endObject());
+ }
+
+ public void testPath() throws Exception {
+ assertResult("{'path':null}", () -> builder().startObject().field("path", (Path) null).endObject());
+
+ final Path path = PathUtils.get("first", "second", "third");
+ final String expected = Constants.WINDOWS ? "{'path':'first\\\\second\\\\third'}" : "{'path':'first/second/third'}";
+ assertResult(expected, () -> builder().startObject().field("path", path).endObject());
+ }
+
+ public void testObjects() throws Exception {
+ Map<String, Object[]> objects = new HashMap<>();
+ objects.put("{'objects':[false,true,false]}", new Object[]{false, true, false});
+ objects.put("{'objects':[1,1,2,3,5,8,13]}", new Object[]{(byte) 1, (byte) 1, (byte) 2, (byte) 3, (byte) 5, (byte) 8, (byte) 13});
+ objects.put("{'objects':[1.0,1.0,2.0,3.0,5.0,8.0,13.0]}", new Object[]{1.0d, 1.0d, 2.0d, 3.0d, 5.0d, 8.0d, 13.0d});
+ objects.put("{'objects':[1.0,1.0,2.0,3.0,5.0,8.0,13.0]}", new Object[]{1.0f, 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, 13.0f});
+ objects.put("{'objects':[{'lat':45.759429931640625,'lon':4.8394775390625}]}", new Object[]{GeoPoint.fromGeohash("u05kq4k")});
+ objects.put("{'objects':[1,1,2,3,5,8,13]}", new Object[]{1, 1, 2, 3, 5, 8, 13});
+ objects.put("{'objects':[1,1,2,3,5,8,13]}", new Object[]{1L, 1L, 2L, 3L, 5L, 8L, 13L});
+ objects.put("{'objects':[1,1,2,3,5,8]}", new Object[]{(short) 1, (short) 1, (short) 2, (short) 3, (short) 5, (short) 8});
+ objects.put("{'objects':['a','b','c']}", new Object[]{"a", "b", "c"});
+ objects.put("{'objects':['a','b','c']}", new Object[]{new Text("a"), new Text(new BytesArray("b")), new Text("c")});
+ objects.put("{'objects':null}", null);
+ objects.put("{'objects':[null,null,null]}", new Object[]{null, null, null});
+ objects.put("{'objects':['OPEN','CLOSE']}", IndexMetaData.State.values());
+ objects.put("{'objects':[{'f1':'v1'},{'f2':'v2'}]}", new Object[]{singletonMap("f1", "v1"), singletonMap("f2", "v2")});
+ objects.put("{'objects':[[1,2,3],[4,5]]}", new Object[]{Arrays.asList(1, 2, 3), Arrays.asList(4, 5)});
+
+ final String paths = Constants.WINDOWS ? "{'objects':['a\\\\b\\\\c','d\\\\e']}" : "{'objects':['a/b/c','d/e']}";
+ objects.put(paths, new Object[]{PathUtils.get("a", "b", "c"), PathUtils.get("d", "e")});
+
+ final DateTimeFormatter formatter = XContentBuilder.DEFAULT_DATE_PRINTER;
+ final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate();
+ final Date d2 = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC).toDate();
+ objects.put("{'objects':['" + formatter.print(d1.getTime()) + "','" + formatter.print(d2.getTime()) + "']}", new Object[]{d1, d2});
+
+ final DateTime dt1 = DateTime.now();
+ final DateTime dt2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC);
+ objects.put("{'objects':['" + formatter.print(dt1) + "','2016-12-25T07:59:42.213Z']}", new Object[]{dt1, dt2});
+
+ final Calendar c1 = new DateTime(2012, 7, 7, 10, 23, DateTimeZone.UTC).toCalendar(Locale.ROOT);
+ final Calendar c2 = new DateTime(2014, 11, 16, 19, 36, DateTimeZone.UTC).toCalendar(Locale.ROOT);
+ objects.put("{'objects':['2012-07-07T10:23:00.000Z','2014-11-16T19:36:00.000Z']}", new Object[]{c1, c2});
+
+ final ToXContent x1 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", 2).array("f3", 3, 4, 5).endObject();
+ final ToXContent x2 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", x1).endObject();
+ objects.put("{'objects':[{'f1':'v1','f2':2,'f3':[3,4,5]},{'f1':'v1','f2':{'f1':'v1','f2':2,'f3':[3,4,5]}}]}", new Object[]{x1, x2});
+
+ for (Map.Entry<String, Object[]> o : objects.entrySet()) {
+ final String expected = o.getKey();
+ assertResult(expected, () -> builder().startObject().field("objects", o.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("objects").value(o.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("objects").values(o.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().array("objects", o.getValue()).endObject());
+ }
+ }
+
+ public void testObject() throws Exception {
+ Map<String, Object> object = new HashMap<>();
+ object.put("{'object':false}", Boolean.FALSE);
+ object.put("{'object':13}", (byte) 13);
+ object.put("{'object':5.0}", 5.0d);
+ object.put("{'object':8.0}", 8.0f);
+ object.put("{'object':{'lat':45.759429931640625,'lon':4.8394775390625}}", GeoPoint.fromGeohash("u05kq4k"));
+ object.put("{'object':3}", 3);
+ object.put("{'object':2}", 2L);
+ object.put("{'object':1}", (short) 1);
+ object.put("{'object':'string'}", "string");
+ object.put("{'object':'a'}", new Text("a"));
+ object.put("{'object':'b'}", new Text(new BytesArray("b")));
+ object.put("{'object':null}", null);
+ object.put("{'object':'OPEN'}", IndexMetaData.State.OPEN);
+ object.put("{'object':'NM'}", DistanceUnit.NAUTICALMILES);
+ object.put("{'object':{'f1':'v1'}}", singletonMap("f1", "v1"));
+ object.put("{'object':{'f1':{'f2':'v2'}}}", singletonMap("f1", singletonMap("f2", "v2")));
+ object.put("{'object':[1,2,3]}", Arrays.asList(1, 2, 3));
+
+ final String path = Constants.WINDOWS ? "{'object':'a\\\\b\\\\c'}" : "{'object':'a/b/c'}";
+ object.put(path, PathUtils.get("a", "b", "c"));
+
+ final DateTimeFormatter formatter = XContentBuilder.DEFAULT_DATE_PRINTER;
+ final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate();
+ object.put("{'object':'" + formatter.print(d1.getTime()) + "'}", d1);
+
+ final DateTime d2 = DateTime.now();
+ object.put("{'object':'" + formatter.print(d2) + "'}", d2);
+
+ final Calendar c1 = new DateTime(2010, 1, 1, 0, 0, DateTimeZone.UTC).toCalendar(Locale.ROOT);
+ object.put("{'object':'2010-01-01T00:00:00.000Z'}", c1);
+
+ final ToXContent x1 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", 2).array("f3", 3, 4, 5).endObject();
+ final ToXContent x2 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", x1).endObject();
+ object.put("{'object':{'f1':'v1','f2':{'f1':'v1','f2':2,'f3':[3,4,5]}}}", x2);
+
+ for (Map.Entry<String, Object> o : object.entrySet()) {
+ final String expected = o.getKey();
+ assertResult(expected, () -> builder().humanReadable(true).startObject().field("object", o.getValue()).endObject());
+ assertResult(expected, () -> builder().humanReadable(true).startObject().field("object").value(o.getValue()).endObject());
+ }
+
+ assertResult("{'objects':[null,null,null]}", () -> builder().startObject().array("objects", null, null, null).endObject());
+ }
+
+ public void testToXContent() throws Exception {
+ assertResult("{'xcontent':null}", () -> builder().startObject().field("xcontent", (ToXContent) null).endObject());
+ assertResult("{'xcontent':null}", () -> builder().startObject().field("xcontent").value((ToXContent) null).endObject());
+
+ ToXContent xcontent0 = (builder, params) -> {
+ builder.startObject();
+ builder.field("field", "value");
+ builder.array("array", "1", "2", "3");
+ builder.startObject("foo");
+ builder.field("bar", "baz");
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ };
+
+ assertResult("{'field':'value','array':['1','2','3'],'foo':{'bar':'baz'}}", () -> builder().value(xcontent0));
+ assertResult("{'xcontent':{'field':'value','array':['1','2','3'],'foo':{'bar':'baz'}}}", () -> builder()
+ .startObject()
+ .field("xcontent", xcontent0)
+ .endObject());
+
+ ToXContent xcontent1 = (builder, params) -> {
+ builder.startObject();
+ builder.field("field", "value");
+ builder.startObject("foo");
+ builder.field("bar", "baz");
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ };
+
+ ToXContent xcontent2 = (builder, params) -> {
+ builder.startObject();
+ builder.field("root", xcontent0);
+ builder.array("childs", xcontent0, xcontent1);
+ builder.endObject();
+ return builder;
+ };
+ assertResult("{'root':{" +
+ "'field':'value'," +
+ "'array':['1','2','3']," +
+ "'foo':{'bar':'baz'}" +
+ "}," +
+ "'childs':[" +
+ "{'field':'value','array':['1','2','3'],'foo':{'bar':'baz'}}," +
+ "{'field':'value','foo':{'bar':'baz'}}" +
+ "]}", () -> builder().value(xcontent2));
+ }
+
+ public void testMap() throws Exception {
+ Map<String, Map<String, ?>> maps = new HashMap<>();
+ maps.put("{'map':null}", (Map) null);
+ maps.put("{'map':{}}", Collections.emptyMap());
+ maps.put("{'map':{'key':'value'}}", singletonMap("key", "value"));
+
+ Map<String, Object> innerMap = new HashMap<>();
+ innerMap.put("string", "value");
+ innerMap.put("int", 42);
+ innerMap.put("long", 42L);
+ innerMap.put("long[]", new long[]{1L, 3L});
+ innerMap.put("path", PathUtils.get("path", "to", "file"));
+ innerMap.put("object", singletonMap("key", "value"));
+
+ final String path = Constants.WINDOWS ? "path\\\\to\\\\file" : "path/to/file";
+ maps.put("{'map':{'path':'" + path + "','string':'value','long[]':[1,3],'int':42,'long':42,'object':{'key':'value'}}}", innerMap);
+
+ for (Map.Entry<String, Map<String, ?>> m : maps.entrySet()) {
+ final String expected = m.getKey();
+ assertResult(expected, () -> builder().startObject().field("map", m.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("map").value(m.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("map").map(m.getValue()).endObject());
+ }
+ }
+
+ public void testIterable() throws Exception {
+ Map<String, Iterable<?>> iterables = new HashMap<>();
+ iterables.put("{'iter':null}", (Iterable) null);
+ iterables.put("{'iter':[]}", Collections.emptyList());
+ iterables.put("{'iter':['a','b']}", Arrays.asList("a", "b"));
+
+ final String path = Constants.WINDOWS ? "{'iter':'path\\\\to\\\\file'}" : "{'iter':'path/to/file'}";
+ iterables.put(path, PathUtils.get("path", "to", "file"));
+
+ final String paths = Constants.WINDOWS ? "{'iter':['a\\\\b\\\\c','c\\\\d']}" : "{'iter':['a/b/c','c/d']}";
+ iterables.put(paths, Arrays.asList(PathUtils.get("a", "b", "c"), PathUtils.get("c", "d")));
+
+ for (Map.Entry<String, Iterable<?>> i : iterables.entrySet()) {
+ final String expected = i.getKey();
+ assertResult(expected, () -> builder().startObject().field("iter", i.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("iter").value(i.getValue()).endObject());
+ }
+ }
+
+ public void testUnknownObject() throws Exception {
+ Map<String, Object> objects = new HashMap<>();
+ objects.put("{'obj':50.63}", DistanceUnit.METERS.fromMeters(50.63));
+ objects.put("{'obj':'MINUTES'}", TimeUnit.MINUTES);
+ objects.put("{'obj':'class org.elasticsearch.common.xcontent.BaseXContentTestCase'}", BaseXContentTestCase.class);
+
+ for (Map.Entry<String, ?> o : objects.entrySet()) {
+ final String expected = o.getKey();
+ assertResult(expected, () -> builder().startObject().field("obj", o.getValue()).endObject());
+ assertResult(expected, () -> builder().startObject().field("obj").value(o.getValue()).endObject());
+ }
+ }
public void testBasics() throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
@@ -50,7 +702,7 @@ public abstract class BaseXContentTestCase extends ESTestCase {
generator.writeNumber(2L);
}
});
- assertEquals(e.getMessage(), "unclosed object or array found");
+ assertEquals(e.getMessage(), "Unclosed object or array found");
}
public void testMissingEndArray() throws IOException {
@@ -61,11 +713,11 @@ public abstract class BaseXContentTestCase extends ESTestCase {
generator.writeNumber(2L);
}
});
- assertEquals(e.getMessage(), "unclosed object or array found");
+ assertEquals(e.getMessage(), "Unclosed object or array found");
}
public void testRawField() throws Exception {
- for (boolean useStream : new boolean[] {false, true}) {
+ for (boolean useStream : new boolean[]{false, true}) {
for (XContentType xcontentType : XContentType.values()) {
doTestRawField(xcontentType.xContent(), useStream);
}
@@ -156,4 +808,96 @@ public abstract class BaseXContentTestCase extends ESTestCase {
assertNull(parser.nextToken());
}
+
+ protected void doTestBigInteger(JsonGenerator generator, ByteArrayOutputStream os) throws Exception {
+ // Big integers cannot be handled explicitly, but if some values happen to be big ints,
+ // we can still call parser.map() and get the bigint value so that eg. source filtering
+ // keeps working
+ BigInteger bigInteger = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE);
+ generator.writeStartObject();
+ generator.writeFieldName("foo");
+ generator.writeString("bar");
+ generator.writeFieldName("bigint");
+ generator.writeNumber(bigInteger);
+ generator.writeEndObject();
+ generator.flush();
+ byte[] serialized = os.toByteArray();
+
+ XContentParser parser = xcontentType().xContent().createParser(serialized);
+ Map<String, Object> map = parser.map();
+ assertEquals("bar", map.get("foo"));
+ assertEquals(bigInteger, map.get("bigint"));
+ }
+
+ public void testEnsureNameNotNull() {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentBuilder.ensureNameNotNull(null));
+ assertThat(e.getMessage(), containsString("Field name cannot be null"));
+ }
+
+ public void testFormatterNameNotNull() {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentBuilder.ensureFormatterNotNull(null));
+ assertThat(e.getMessage(), containsString("DateTimeFormatter cannot be null"));
+ }
+
+ public void testEnsureNotNull() {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentBuilder.ensureNotNull(null, "message"));
+ assertThat(e.getMessage(), containsString("message"));
+
+ XContentBuilder.ensureNotNull("foo", "No exception must be thrown");
+ }
+
+ private static void expectUnclosedException(ThrowingRunnable runnable) {
+ IllegalStateException e = expectThrows(IllegalStateException.class, runnable);
+ assertThat(e.getMessage(), containsString("Failed to close the XContentBuilder"));
+ assertThat(e.getCause(), allOf(notNullValue(), instanceOf(IOException.class)));
+ assertThat(e.getCause().getMessage(), containsString("Unclosed object or array found"));
+ }
+
+ private static void expectValueException(ThrowingRunnable runnable) {
+ JsonGenerationException e = expectThrows(JsonGenerationException.class, runnable);
+ assertThat(e.getMessage(), containsString("expecting a value"));
+ }
+
+ private static void expectFieldException(ThrowingRunnable runnable) {
+ JsonGenerationException e = expectThrows(JsonGenerationException.class, runnable);
+ assertThat(e.getMessage(), containsString("expecting field name"));
+ }
+
+ private static void expectNonNullFieldException(ThrowingRunnable runnable) {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, runnable);
+ assertThat(e.getMessage(), containsString("Field name cannot be null"));
+ }
+
+ private static void expectNonNullFormatterException(ThrowingRunnable runnable) {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, runnable);
+ assertThat(e.getMessage(), containsString("DateTimeFormatter cannot be null"));
+ }
+
+ private static void expectObjectException(ThrowingRunnable runnable) {
+ JsonGenerationException e = expectThrows(JsonGenerationException.class, runnable);
+ assertThat(e.getMessage(), containsString("Current context not Object"));
+ }
+
+ private static void expectArrayException(ThrowingRunnable runnable) {
+ JsonGenerationException e = expectThrows(JsonGenerationException.class, runnable);
+ assertThat(e.getMessage(), containsString("Current context not Array"));
+ }
+
+ public static Matcher<String> equalToJson(String json) {
+ return Matchers.equalTo(json.replace("'", "\""));
+ }
+
+ private static void assertResult(String expected, Builder builder) throws IOException {
+ // Build the XContentBuilder, convert its bytes to JSON and check it matches
+ assertThat(XContentHelper.convertToJson(builder.build().bytes(), randomBoolean()), equalToJson(expected));
+ }
+
+ private static byte[] randomBytes() throws Exception {
+ return randomUnicodeOfLength(scaledRandomIntBetween(10, 1000)).getBytes("UTF-8");
+ }
+
+ @FunctionalInterface
+ private interface Builder {
+ XContentBuilder build() throws IOException;
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java
index bef4a047ef..cad712951d 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java
@@ -25,6 +25,8 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.AbstractObjectParser.ContextParser;
+import org.elasticsearch.common.xcontent.AbstractObjectParser.NoContextParser;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matcher;
@@ -43,6 +45,27 @@ import static org.hamcrest.Matchers.nullValue;
public class ConstructingObjectParserTests extends ESTestCase {
private static final ParseFieldMatcherSupplier MATCHER = () -> ParseFieldMatcher.STRICT;
+ public void testNullDeclares() {
+ ConstructingObjectParser<Void, ParseFieldMatcherSupplier> objectParser = new ConstructingObjectParser<>("foo", a -> null);
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> objectParser.declareField(null, (r, c) -> null, new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[consumer] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (ContextParser<ParseFieldMatcherSupplier, Object>) null,
+ new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[parser] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (NoContextParser<Object>) null,
+ new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[parser] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (r, c) -> null, null, ObjectParser.ValueType.STRING));
+ assertEquals("[parseField] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (r, c) -> null, new ParseField("test"), null));
+ assertEquals("[type] is required", e.getMessage());
+ }
+
/**
* Builds the object in random order and parses it.
*/
@@ -261,6 +284,25 @@ public class ConstructingObjectParserTests extends ESTestCase {
assertTrue(result.fooSet);
}
+ public void testIgnoreUnknownFields() throws IOException {
+ XContentParser parser = XContentType.JSON.xContent().createParser(
+ "{\n"
+ + " \"test\" : \"foo\",\n"
+ + " \"junk\" : 2\n"
+ + "}");
+ class TestStruct {
+ public final String test;
+ public TestStruct(String test) {
+ this.test = test;
+ }
+ }
+ ConstructingObjectParser<TestStruct, ParseFieldMatcherSupplier> objectParser = new ConstructingObjectParser<>("foo", true, a ->
+ new TestStruct((String) a[0]));
+ objectParser.declareString(constructorArg(), new ParseField("test"));
+ TestStruct s = objectParser.apply(parser, MATCHER);
+ assertEquals(s.test, "foo");
+ }
+
private static class HasCtorArguments implements ToXContent {
@Nullable
final String animal;
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
index a8d26e87ec..2cc4889be9 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
@@ -18,21 +18,23 @@
*/
package org.elasticsearch.common.xcontent;
-import static org.hamcrest.Matchers.hasSize;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.xcontent.AbstractObjectParser.ContextParser;
+import org.elasticsearch.common.xcontent.AbstractObjectParser.NoContextParser;
import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.test.ESTestCase;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.Matchers.hasSize;
+
public class ObjectParserTests extends ESTestCase {
private static final ParseFieldMatcherSupplier STRICT_PARSING = () -> ParseFieldMatcher.STRICT;
@@ -72,6 +74,27 @@ public class ObjectParserTests extends ESTestCase {
+ "FieldParser{preferred_name=test_number, supportedTokens=[VALUE_STRING, VALUE_NUMBER], type=INT}]}");
}
+ public void testNullDeclares() {
+ ObjectParser<Void, ParseFieldMatcherSupplier> objectParser = new ObjectParser<>("foo");
+ Exception e = expectThrows(IllegalArgumentException.class,
+ () -> objectParser.declareField(null, (r, c) -> null, new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[consumer] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (ContextParser<ParseFieldMatcherSupplier, Object>) null,
+ new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[parser] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (NoContextParser<Object>) null,
+ new ParseField("test"), ObjectParser.ValueType.STRING));
+ assertEquals("[parser] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (r, c) -> null, null, ObjectParser.ValueType.STRING));
+ assertEquals("[parseField] is required", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> objectParser.declareField(
+ (o, v) -> {}, (r, c) -> null, new ParseField("test"), null));
+ assertEquals("[type] is required", e.getMessage());
+ }
+
public void testObjectOrDefault() throws IOException {
XContentParser parser = XContentType.JSON.xContent().createParser("{\"object\" : { \"test\": 2}}");
ObjectParser<StaticTestStruct, ParseFieldMatcherSupplier> objectParser = new ObjectParser<>("foo", StaticTestStruct::new);
@@ -440,6 +463,77 @@ public class ObjectParserTests extends ESTestCase {
assertEquals("[named] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage());
}
+ public void testIgnoreUnknownFields() throws IOException {
+ XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent());
+ b.startObject();
+ {
+ b.field("test", "foo");
+ b.field("junk", 2);
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ XContentParser parser = XContentType.JSON.xContent().createParser(b.bytes());
+
+ class TestStruct {
+ public String test;
+ }
+ ObjectParser<TestStruct, ParseFieldMatcherSupplier> objectParser = new ObjectParser<>("foo", true, null);
+ objectParser.declareField((i, c, x) -> c.test = i.text(), new ParseField("test"), ObjectParser.ValueType.STRING);
+ TestStruct s = objectParser.parse(parser, new TestStruct(), STRICT_PARSING);
+ assertEquals(s.test, "foo");
+ }
+
+ public void testIgnoreUnknownObjects() throws IOException {
+ XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent());
+ b.startObject();
+ {
+ b.field("test", "foo");
+ b.startObject("junk");
+ {
+ b.field("really", "junk");
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ XContentParser parser = XContentType.JSON.xContent().createParser(b.bytes());
+
+ class TestStruct {
+ public String test;
+ }
+ ObjectParser<TestStruct, ParseFieldMatcherSupplier> objectParser = new ObjectParser<>("foo", true, null);
+ objectParser.declareField((i, c, x) -> c.test = i.text(), new ParseField("test"), ObjectParser.ValueType.STRING);
+ TestStruct s = objectParser.parse(parser, new TestStruct(), STRICT_PARSING);
+ assertEquals(s.test, "foo");
+ }
+
+ public void testIgnoreUnknownArrays() throws IOException {
+ XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent());
+ b.startObject();
+ {
+ b.field("test", "foo");
+ b.startArray("junk");
+ {
+ b.startObject();
+ {
+ b.field("really", "junk");
+ }
+ b.endObject();
+ }
+ b.endArray();
+ }
+ b.endObject();
+ b = shuffleXContent(b);
+ XContentParser parser = XContentType.JSON.xContent().createParser(b.bytes());
+ class TestStruct {
+ public String test;
+ }
+ ObjectParser<TestStruct, ParseFieldMatcherSupplier> objectParser = new ObjectParser<>("foo", true, null);
+ objectParser.declareField((i, c, x) -> c.test = i.text(), new ParseField("test"), ObjectParser.ValueType.STRING);
+ TestStruct s = objectParser.parse(parser, new TestStruct(), STRICT_PARSING);
+ assertEquals(s.test, "foo");
+ }
+
static class NamedObjectHolder {
public static final ObjectParser<NamedObjectHolder, ParseFieldMatcherSupplier> PARSER = new ObjectParser<>("named_object_holder",
NamedObjectHolder::new);
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
index d0e095e8c6..f0b1b7f5e1 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -173,9 +173,9 @@ public class XContentBuilderTests extends ESTestCase {
public void testDateTypesConversion() throws Exception {
Date date = new Date();
- String expectedDate = XContentBuilder.defaultDatePrinter.print(date.getTime());
+ String expectedDate = XContentBuilder.DEFAULT_DATE_PRINTER.print(date.getTime());
Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT);
- String expectedCalendar = XContentBuilder.defaultDatePrinter.print(calendar.getTimeInMillis());
+ String expectedCalendar = XContentBuilder.DEFAULT_DATE_PRINTER.print(calendar.getTimeInMillis());
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject().field("date", date).endObject();
assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
@@ -339,17 +339,17 @@ public class XContentBuilderTests extends ESTestCase {
builder.map(Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("field name cannot be null"));
+ assertThat(e.getMessage(), equalTo("Field name cannot be null"));
}
}
public void testWriteMapValueWithNullKeys() throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
try {
- builder.value(Collections.singletonMap(null, "test"));
+ builder.map(Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("field name cannot be null"));
+ assertThat(e.getMessage(), equalTo("Field name cannot be null"));
}
}
@@ -360,7 +360,7 @@ public class XContentBuilderTests extends ESTestCase {
builder.field("map", Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("field name cannot be null"));
+ assertThat(e.getMessage(), equalTo("Field name cannot be null"));
}
}
@@ -371,8 +371,8 @@ public class XContentBuilderTests extends ESTestCase {
builder.field("foo", true);
}
});
- assertThat(e.getMessage(), equalTo("failed to close the XContentBuilder"));
- assertThat(e.getCause().getMessage(), equalTo("unclosed object or array found"));
+ assertThat(e.getMessage(), equalTo("Failed to close the XContentBuilder"));
+ assertThat(e.getCause().getMessage(), equalTo("Unclosed object or array found"));
}
public void testMissingEndArray() throws IOException {
@@ -384,7 +384,7 @@ public class XContentBuilderTests extends ESTestCase {
builder.value(1);
}
});
- assertThat(e.getMessage(), equalTo("failed to close the XContentBuilder"));
- assertThat(e.getCause().getMessage(), equalTo("unclosed object or array found"));
+ assertThat(e.getMessage(), equalTo("Failed to close the XContentBuilder"));
+ assertThat(e.getCause().getMessage(), equalTo("Unclosed object or array found"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java
index 928b8a6a5a..4b2e7a9695 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentTests.java
@@ -19,9 +19,14 @@
package org.elasticsearch.common.xcontent.cbor;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
+
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.common.xcontent.XContentType;
+import java.io.ByteArrayOutputStream;
+
public class CborXContentTests extends BaseXContentTestCase {
@Override
@@ -29,4 +34,9 @@ public class CborXContentTests extends BaseXContentTestCase {
return XContentType.CBOR;
}
+ public void testBigInteger() throws Exception {
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ JsonGenerator generator = new CBORFactory().createGenerator(os);
+ doTestBigInteger(generator, os);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java
index efbca114aa..fb726b97e3 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java
@@ -48,8 +48,10 @@ public class JsonVsCborTests extends ESTestCase {
xsonGen.writeStringField("test", "value");
jsonGen.writeStringField("test", "value");
- xsonGen.writeArrayFieldStart("arr");
- jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeFieldName("arr");
+ xsonGen.writeStartArray();
+ jsonGen.writeFieldName("arr");
+ jsonGen.writeStartArray();
xsonGen.writeNumber(1);
jsonGen.writeNumber(1);
xsonGen.writeNull();
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
index 8a739eef4b..4a79ddb4ec 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java
@@ -19,9 +19,14 @@
package org.elasticsearch.common.xcontent.json;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.common.xcontent.XContentType;
+import java.io.ByteArrayOutputStream;
+
public class JsonXContentTests extends BaseXContentTestCase {
@Override
@@ -29,4 +34,9 @@ public class JsonXContentTests extends BaseXContentTestCase {
return XContentType.JSON;
}
+ public void testBigInteger() throws Exception {
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ JsonGenerator generator = new JsonFactory().createGenerator(os);
+ doTestBigInteger(generator, os);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
index 63b19a6382..ecf49be662 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
@@ -48,8 +48,10 @@ public class JsonVsSmileTests extends ESTestCase {
xsonGen.writeStringField("test", "value");
jsonGen.writeStringField("test", "value");
- xsonGen.writeArrayFieldStart("arr");
- jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeFieldName("arr");
+ xsonGen.writeStartArray();
+ jsonGen.writeFieldName("arr");
+ jsonGen.writeStartArray();
xsonGen.writeNumber(1);
jsonGen.writeNumber(1);
xsonGen.writeNull();
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java
index 6961e84416..71f64ab502 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/smile/SmileXContentTests.java
@@ -19,9 +19,14 @@
package org.elasticsearch.common.xcontent.smile;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.dataformat.smile.SmileFactory;
+
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.common.xcontent.XContentType;
+import java.io.ByteArrayOutputStream;
+
public class SmileXContentTests extends BaseXContentTestCase {
@Override
@@ -29,4 +34,9 @@ public class SmileXContentTests extends BaseXContentTestCase {
return XContentType.SMILE;
}
+ public void testBigInteger() throws Exception {
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ JsonGenerator generator = new SmileFactory().createGenerator(os);
+ doTestBigInteger(generator, os);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
index 1c4ff9874a..ba2043bbe2 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
@@ -139,7 +139,7 @@ public class XContentMapValuesTests extends ESTestCase {
// lists
builder = XContentFactory.jsonBuilder().startObject()
- .startObject("path1").field("test", "value1", "value2").endObject()
+ .startObject("path1").array("test", "value1", "value2").endObject()
.endObject();
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.string())) {
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTestCase.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTestCase.java
index b8b38a543f..f7ffcac32b 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTestCase.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTestCase.java
@@ -20,6 +20,7 @@
package org.elasticsearch.common.xcontent.support.filtering;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -28,7 +29,11 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
+import java.util.Set;
+import java.util.function.Function;
+import static java.util.Collections.emptySet;
+import static java.util.Collections.singleton;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
@@ -86,12 +91,16 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
return XContentBuilder.builder(getXContentType().xContent());
}
- private XContentBuilder newXContentBuilder(String filter, boolean inclusive) throws IOException {
- return XContentBuilder.builder(getXContentType().xContent(), new String[] { filter }, inclusive);
+ private XContentBuilder newXContentBuilderWithIncludes(String filter) throws IOException {
+ return newXContentBuilder(singleton(filter), emptySet());
}
- private XContentBuilder newXContentBuilder(String[] filters, boolean inclusive) throws IOException {
- return XContentBuilder.builder(getXContentType().xContent(), filters, inclusive);
+ private XContentBuilder newXContentBuilderWithExcludes(String filter) throws IOException {
+ return newXContentBuilder(emptySet(), singleton(filter));
+ }
+
+ private XContentBuilder newXContentBuilder(Set<String> includes, Set<String> excludes) throws IOException {
+ return XContentBuilder.builder(getXContentType().xContent(), includes, excludes);
}
/**
@@ -173,20 +182,22 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
return builder;
}
- /**
- * Instanciates a new XContentBuilder with the given filters and builds a
- * sample with it.
- * @param inclusive
- * Specifies if filters are inclusive or exclusive
- */
- private XContentBuilder sample(String filter, boolean inclusive) throws IOException {
- return sample(newXContentBuilder(filter, inclusive));
+ /** Create a new {@link XContentBuilder} and use it to build the sample using the given inclusive filter **/
+ private XContentBuilder sampleWithIncludes(String filter) throws IOException {
+ return sample(newXContentBuilderWithIncludes(filter));
}
- private XContentBuilder sample(String[] filters, boolean inclusive) throws IOException {
- return sample(newXContentBuilder(filters, inclusive));
+ /** Create a new {@link XContentBuilder} and use it to build the sample using the given exclusive filter **/
+ private XContentBuilder sampleWithExcludes(String filter) throws IOException {
+ return sample(newXContentBuilderWithExcludes(filter));
}
+ /** Create a new {@link XContentBuilder} and use it to build the sample using the given includes and exclusive filters **/
+ private XContentBuilder sampleWithFilters(Set<String> includes, Set<String> excludes) throws IOException {
+ return sample(newXContentBuilder(includes, excludes));
+ }
+
+ /** Create a new {@link XContentBuilder} and use it to build the sample **/
private XContentBuilder sample() throws IOException {
return sample(newXContentBuilder());
}
@@ -195,23 +206,23 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
XContentBuilder expected = sample();
assertXContentBuilder(expected, sample());
- assertXContentBuilder(expected, sample("*", true));
- assertXContentBuilder(expected, sample("**", true));
- assertXContentBuilder(expected, sample("xyz", false));
+ assertXContentBuilder(expected, sampleWithIncludes("*"));
+ assertXContentBuilder(expected, sampleWithIncludes("**"));
+ assertXContentBuilder(expected, sampleWithExcludes("xyz"));
}
public void testNoMatch() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject().endObject();
- assertXContentBuilder(expected, sample("xyz", true));
- assertXContentBuilder(expected, sample("*", false));
- assertXContentBuilder(expected, sample("**", false));
+ assertXContentBuilder(expected, sampleWithIncludes("xyz"));
+ assertXContentBuilder(expected, sampleWithExcludes("*"));
+ assertXContentBuilder(expected, sampleWithExcludes("**"));
}
public void testSimpleFieldInclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject().field("title", "My awesome book").endObject();
- assertXContentBuilder(expected, sample("title", true));
+ assertXContentBuilder(expected, sampleWithIncludes("title"));
}
public void testSimpleFieldExclusive() throws Exception {
@@ -286,10 +297,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("title", false));
+ assertXContentBuilder(expected, sampleWithExcludes("title"));
}
-
public void testSimpleFieldWithWildcardInclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject()
.field("price", 27.99)
@@ -343,7 +353,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("pr*", true));
+ assertXContentBuilder(expected, sampleWithIncludes("pr*"));
}
public void testSimpleFieldWithWildcardExclusive() throws Exception {
@@ -370,7 +380,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endArray()
.endObject();
- assertXContentBuilder(expected, sample("pr*", false));
+ assertXContentBuilder(expected, sampleWithExcludes("pr*"));
}
public void testMultipleFieldsInclusive() throws Exception {
@@ -379,7 +389,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.field("pages", 456)
.endObject();
- assertXContentBuilder(expected, sample(new String[] { "title", "pages" }, true));
+ assertXContentBuilder(expected, sampleWithFilters(Sets.newHashSet("title", "pages"), emptySet()));
}
public void testMultipleFieldsExclusive() throws Exception {
@@ -453,10 +463,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample(new String[] { "title", "pages" }, false));
+ assertXContentBuilder(expected, sample(newXContentBuilder(emptySet(), Sets.newHashSet("title", "pages"))));
}
-
public void testSimpleArrayInclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject()
.startArray("tags")
@@ -465,7 +474,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endArray()
.endObject();
- assertXContentBuilder(expected, sample("tags", true));
+ assertXContentBuilder(expected, sampleWithIncludes("tags"));
}
public void testSimpleArrayExclusive() throws Exception {
@@ -537,10 +546,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("tags", false));
+ assertXContentBuilder(expected, sampleWithExcludes("tags"));
}
-
public void testSimpleArrayOfObjectsInclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject()
.startArray("authors")
@@ -557,9 +565,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endArray()
.endObject();
- assertXContentBuilder(expected, sample("authors", true));
- assertXContentBuilder(expected, sample("authors.*", true));
- assertXContentBuilder(expected, sample("authors.*name", true));
+ assertXContentBuilder(expected, sampleWithIncludes("authors"));
+ assertXContentBuilder(expected, sampleWithIncludes("authors.*"));
+ assertXContentBuilder(expected, sampleWithIncludes("authors.*name"));
}
public void testSimpleArrayOfObjectsExclusive() throws Exception {
@@ -623,9 +631,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("authors", false));
- assertXContentBuilder(expected, sample("authors.*", false));
- assertXContentBuilder(expected, sample("authors.*name", false));
+ assertXContentBuilder(expected, sampleWithExcludes("authors"));
+ assertXContentBuilder(expected, sampleWithExcludes("authors.*"));
+ assertXContentBuilder(expected, sampleWithExcludes("authors.*name"));
}
public void testSimpleArrayOfObjectsPropertyInclusive() throws Exception {
@@ -640,8 +648,8 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endArray()
.endObject();
- assertXContentBuilder(expected, sample("authors.lastname", true));
- assertXContentBuilder(expected, sample("authors.l*", true));
+ assertXContentBuilder(expected, sampleWithIncludes("authors.lastname"));
+ assertXContentBuilder(expected, sampleWithIncludes("authors.l*"));
}
public void testSimpleArrayOfObjectsPropertyExclusive() throws Exception {
@@ -715,8 +723,8 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("authors.lastname", false));
- assertXContentBuilder(expected, sample("authors.l*", false));
+ assertXContentBuilder(expected, sampleWithExcludes("authors.lastname"));
+ assertXContentBuilder(expected, sampleWithExcludes("authors.l*"));
}
public void testRecurseField1Inclusive() throws Exception {
@@ -768,7 +776,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("**.name", true));
+ assertXContentBuilder(expected, sampleWithIncludes("**.name"));
}
public void testRecurseField1Exclusive() throws Exception {
@@ -831,7 +839,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("**.name", false));
+ assertXContentBuilder(expected, sampleWithExcludes("**.name"));
}
public void testRecurseField2Inclusive() throws Exception {
@@ -875,7 +883,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.**.name", true));
+ assertXContentBuilder(expected, sampleWithIncludes("properties.**.name"));
}
public void testRecurseField2Exclusive() throws Exception {
@@ -940,10 +948,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.**.name", false));
+ assertXContentBuilder(expected, sampleWithExcludes("properties.**.name"));
}
-
public void testRecurseField3Inclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject()
.startObject("properties")
@@ -970,7 +977,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.*.en.**.name", true));
+ assertXContentBuilder(expected, sampleWithIncludes("properties.*.en.**.name"));
}
public void testRecurseField3Exclusive() throws Exception {
@@ -1040,10 +1047,9 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.*.en.**.name", false));
+ assertXContentBuilder(expected, sampleWithExcludes("properties.*.en.**.name"));
}
-
public void testRecurseField4Inclusive() throws Exception {
XContentBuilder expected = newXContentBuilder().startObject()
.startObject("properties")
@@ -1072,7 +1078,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.**.distributors.name", true));
+ assertXContentBuilder(expected, sampleWithIncludes("properties.**.distributors.name"));
}
public void testRecurseField4Exclusive() throws Exception {
@@ -1140,7 +1146,7 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
.endObject()
.endObject();
- assertXContentBuilder(expected, sample("properties.**.distributors.name", false));
+ assertXContentBuilder(expected, sampleWithExcludes("properties.**.distributors.name"));
}
public void testRawField() throws Exception {
@@ -1155,24 +1161,24 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
// Test method: rawField(String fieldName, BytesReference content)
assertXContentBuilder(expectedRawField, newXContentBuilder().startObject().field("foo", 0).rawField("raw", raw).endObject());
assertXContentBuilder(expectedRawFieldFiltered,
- newXContentBuilder("f*", true).startObject().field("foo", 0).rawField("raw", raw).endObject());
+ newXContentBuilderWithIncludes("f*").startObject().field("foo", 0).rawField("raw", raw).endObject());
assertXContentBuilder(expectedRawFieldFiltered,
- newXContentBuilder("r*", false).startObject().field("foo", 0).rawField("raw", raw).endObject());
+ newXContentBuilderWithExcludes("r*").startObject().field("foo", 0).rawField("raw", raw).endObject());
assertXContentBuilder(expectedRawFieldNotFiltered,
- newXContentBuilder("r*", true).startObject().field("foo", 0).rawField("raw", raw).endObject());
+ newXContentBuilderWithIncludes("r*").startObject().field("foo", 0).rawField("raw", raw).endObject());
assertXContentBuilder(expectedRawFieldNotFiltered,
- newXContentBuilder("f*", false).startObject().field("foo", 0).rawField("raw", raw).endObject());
+ newXContentBuilderWithExcludes("f*").startObject().field("foo", 0).rawField("raw", raw).endObject());
// Test method: rawField(String fieldName, InputStream content)
assertXContentBuilder(expectedRawField,
newXContentBuilder().startObject().field("foo", 0).rawField("raw", raw.streamInput()).endObject());
- assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilder("f*", true).startObject().field("foo", 0)
+ assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilderWithIncludes("f*").startObject().field("foo", 0)
.rawField("raw", raw.streamInput()).endObject());
- assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilder("r*", false).startObject().field("foo", 0)
+ assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilderWithExcludes("r*").startObject().field("foo", 0)
.rawField("raw", raw.streamInput()).endObject());
- assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilder("r*", true).startObject().field("foo", 0)
+ assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilderWithIncludes("r*").startObject().field("foo", 0)
.rawField("raw", raw.streamInput()).endObject());
- assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilder("f*", false).startObject().field("foo", 0)
+ assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilderWithExcludes("f*").startObject().field("foo", 0)
.rawField("raw", raw.streamInput()).endObject());
}
@@ -1180,48 +1186,209 @@ public abstract class AbstractFilteringJsonGeneratorTestCase extends ESTestCase
// Test: Array of values (no filtering)
XContentBuilder expected = newXContentBuilder().startObject().startArray("tags").value("lorem").value("ipsum").value("dolor")
.endArray().endObject();
- assertXContentBuilder(expected, newXContentBuilder("t*", true).startObject().startArray("tags").value("lorem").value("ipsum")
- .value("dolor").endArray().endObject());
- assertXContentBuilder(expected, newXContentBuilder("tags", true).startObject().startArray("tags").value("lorem").value("ipsum")
+ assertXContentBuilder(expected, newXContentBuilderWithIncludes("t*").startObject().startArray("tags").value("lorem").value("ipsum")
.value("dolor").endArray().endObject());
- assertXContentBuilder(expected, newXContentBuilder("a", false).startObject().startArray("tags").value("lorem").value("ipsum")
+ assertXContentBuilder(expected, newXContentBuilderWithIncludes("tags").startObject().startArray("tags").value("lorem")
+ .value("ipsum").value("dolor").endArray().endObject());
+ assertXContentBuilder(expected, newXContentBuilderWithExcludes("a").startObject().startArray("tags").value("lorem").value("ipsum")
.value("dolor").endArray().endObject());
// Test: Array of values (with filtering)
- assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilder("foo", true).startObject()
+ assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilderWithIncludes("foo").startObject()
.startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
- assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilder("t*", false).startObject()
+ assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilderWithExcludes("t*").startObject()
.startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
- assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilder("tags", false).startObject()
+ assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilderWithExcludes("tags").startObject()
.startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
// Test: Array of objects (no filtering)
expected = newXContentBuilder().startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject()
.field("firstname", "ipsum").endObject().endArray().endObject();
- assertXContentBuilder(expected, newXContentBuilder("t*", true).startObject().startArray("tags").startObject()
+ assertXContentBuilder(expected, newXContentBuilderWithIncludes("t*").startObject().startArray("tags").startObject()
.field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
- assertXContentBuilder(expected, newXContentBuilder("tags", true).startObject().startArray("tags").startObject()
+ assertXContentBuilder(expected, newXContentBuilderWithIncludes("tags").startObject().startArray("tags").startObject()
.field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
- assertXContentBuilder(expected, newXContentBuilder("a", false).startObject().startArray("tags").startObject()
+ assertXContentBuilder(expected, newXContentBuilderWithExcludes("a").startObject().startArray("tags").startObject()
.field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
// Test: Array of objects (with filtering)
assertXContentBuilder(newXContentBuilder().startObject().endObject(),
- newXContentBuilder("foo", true).startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
+ newXContentBuilderWithIncludes("foo").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
.startObject().field("firstname", "ipsum").endObject().endArray().endObject());
assertXContentBuilder(newXContentBuilder().startObject().endObject(),
- newXContentBuilder("t*", false).startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
+ newXContentBuilderWithExcludes("t*").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
.startObject().field("firstname", "ipsum").endObject().endArray().endObject());
assertXContentBuilder(newXContentBuilder().startObject().endObject(),
- newXContentBuilder("tags", false).startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
+ newXContentBuilderWithExcludes("tags").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject()
.startObject().field("firstname", "ipsum").endObject().endArray().endObject());
// Test: Array of objects (with partial filtering)
expected = newXContentBuilder().startObject().startArray("tags").startObject().field("firstname", "ipsum").endObject().endArray()
.endObject();
- assertXContentBuilder(expected, newXContentBuilder("t*.firstname", true).startObject().startArray("tags").startObject()
+ assertXContentBuilder(expected, newXContentBuilderWithIncludes("t*.firstname").startObject().startArray("tags").startObject()
.field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
- assertXContentBuilder(expected, newXContentBuilder("t*.lastname", false).startObject().startArray("tags").startObject()
+ assertXContentBuilder(expected, newXContentBuilderWithExcludes("t*.lastname").startObject().startArray("tags").startObject()
.field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
}
+
+ public void testEmptyObject() throws IOException {
+ final Function<XContentBuilder, XContentBuilder> build = builder -> {
+ try {
+ return builder.startObject().startObject("foo").endObject().endObject();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ };
+
+ XContentBuilder expected = build.apply(newXContentBuilder());
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithIncludes("foo")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("f*"), singleton("baz"))));
+
+ expected = newXContentBuilder().startObject().endObject();
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("foo")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithIncludes("bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("f*"), singleton("foo"))));
+ }
+
+ public void testSingleFieldObject() throws IOException {
+ final Function<XContentBuilder, XContentBuilder> build = builder -> {
+ try {
+ return builder.startObject().startObject("foo").field("bar", "test").endObject().endObject();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ };
+
+ XContentBuilder expected = build.apply(newXContentBuilder());
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithIncludes("foo.bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("foo.baz")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("foo"), singleton("foo.baz"))));
+
+ expected = newXContentBuilder().startObject().endObject();
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("foo.bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("foo"), singleton("foo.b*"))));
+ }
+
+ public void testSingleFieldWithBothExcludesIncludes() throws IOException {
+ XContentBuilder expected = newXContentBuilder()
+ .startObject()
+ .field("pages", 456)
+ .field("price", 27.99)
+ .endObject();
+
+ assertXContentBuilder(expected, sampleWithFilters(singleton("p*"), singleton("properties")));
+ }
+
+ public void testObjectsInArrayWithBothExcludesIncludes() throws IOException {
+ Set<String> includes = Sets.newHashSet("tags", "authors");
+ Set<String> excludes = singleton("authors.name");
+
+ XContentBuilder expected = newXContentBuilder()
+ .startObject()
+ .startArray("tags")
+ .value("elasticsearch")
+ .value("java")
+ .endArray()
+ .startArray("authors")
+ .startObject()
+ .field("lastname", "John")
+ .field("firstname", "Doe")
+ .endObject()
+ .startObject()
+ .field("lastname", "William")
+ .field("firstname", "Smith")
+ .endObject()
+ .endArray()
+ .endObject();
+
+ assertXContentBuilder(expected, sampleWithFilters(includes, excludes));
+ }
+
+ public void testRecursiveObjectsInArrayWithBothExcludesIncludes() throws IOException {
+ Set<String> includes = Sets.newHashSet("**.language", "properties.weight");
+ Set<String> excludes = singleton("**.distributors");
+
+ XContentBuilder expected = newXContentBuilder()
+ .startObject()
+ .startObject("properties")
+ .field("weight", 0.8d)
+ .startObject("language")
+ .startObject("en")
+ .field("lang", "English")
+ .field("available", true)
+ .endObject()
+ .startObject("fr")
+ .field("lang", "French")
+ .field("available", false)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sampleWithFilters(includes, excludes));
+ }
+
+ public void testRecursiveSameObjectWithBothExcludesIncludes() throws IOException {
+ Set<String> includes = singleton("**.distributors");
+ Set<String> excludes = singleton("**.distributors");
+
+ XContentBuilder expected = newXContentBuilder().startObject().endObject();
+ assertXContentBuilder(expected, sampleWithFilters(includes, excludes));
+ }
+
+ public void testRecursiveObjectsPropertiesWithBothExcludesIncludes() throws IOException {
+ Set<String> includes = singleton("**.en.*");
+ Set<String> excludes = Sets.newHashSet("**.distributors.*.name", "**.street");
+
+ XContentBuilder expected = newXContentBuilder()
+ .startObject()
+ .startObject("properties")
+ .startObject("language")
+ .startObject("en")
+ .field("lang", "English")
+ .field("available", true)
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("city", "London")
+ .endObject()
+ .startObject()
+ .field("city", "Stornoway")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sampleWithFilters(includes, excludes));
+ }
+
+ public void testWithLfAtEnd() throws IOException {
+ final Function<XContentBuilder, XContentBuilder> build = builder -> {
+ try {
+ return builder.startObject().startObject("foo").field("bar", "baz").endObject().endObject().prettyPrint().lfAtEnd();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ };
+
+ XContentBuilder expected = build.apply(newXContentBuilder());
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithIncludes("foo")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("f*"), singleton("baz"))));
+
+ expected = newXContentBuilder().startObject().endObject().prettyPrint().lfAtEnd();
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithExcludes("foo")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilderWithIncludes("bar")));
+ assertXContentBuilder(expected, build.apply(newXContentBuilder(singleton("f*"), singleton("foo"))));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
index 8dbefedb24..b4d7cb1152 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
@@ -25,6 +25,8 @@ import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.test.ESTestCase;
+import java.util.Collections;
+
import static org.hamcrest.Matchers.equalTo;
public class FilterPathGeneratorFilteringTests extends ESTestCase {
@@ -135,7 +137,7 @@ public class FilterPathGeneratorFilteringTests extends ESTestCase {
private void assertResult(String input, String filter, boolean inclusive, String expected) throws Exception {
try (BytesStreamOutput os = new BytesStreamOutput()) {
try (FilteringGeneratorDelegate generator = new FilteringGeneratorDelegate(JSON_FACTORY.createGenerator(os),
- new FilterPathBasedFilter(new String[] { filter }, inclusive), true, true)) {
+ new FilterPathBasedFilter(Collections.singleton(filter), inclusive), true, true)) {
try (JsonParser parser = JSON_FACTORY.createParser(replaceQuotes(input))) {
while (parser.nextToken() != null) {
generator.copyCurrentStructure(parser);
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java
index 80cc12b5f3..4eec46d9b2 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java
@@ -19,8 +19,12 @@
package org.elasticsearch.common.xcontent.support.filtering;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.test.ESTestCase;
+import java.util.Set;
+
+import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
@@ -33,7 +37,7 @@ public class FilterPathTests extends ESTestCase {
public void testSimpleFilterPath() {
final String input = "test";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -52,7 +56,7 @@ public class FilterPathTests extends ESTestCase {
public void testFilterPathWithSubField() {
final String input = "foo.bar";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -76,7 +80,7 @@ public class FilterPathTests extends ESTestCase {
public void testFilterPathWithSubFields() {
final String input = "foo.bar.quz";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -103,13 +107,13 @@ public class FilterPathTests extends ESTestCase {
}
public void testEmptyFilterPath() {
- FilterPath[] filterPaths = FilterPath.compile("");
+ FilterPath[] filterPaths = FilterPath.compile(singleton(""));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(0));
}
public void testNullFilterPath() {
- FilterPath[] filterPaths = FilterPath.compile((String) null);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(null));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(0));
}
@@ -117,7 +121,7 @@ public class FilterPathTests extends ESTestCase {
public void testFilterPathWithEscapedDots() {
String input = "w.0.0.t";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -149,7 +153,7 @@ public class FilterPathTests extends ESTestCase {
input = "w\\.0\\.0\\.t";
- filterPaths = FilterPath.compile(input);
+ filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -167,7 +171,7 @@ public class FilterPathTests extends ESTestCase {
input = "w\\.0.0\\.t";
- filterPaths = FilterPath.compile(input);
+ filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -188,7 +192,7 @@ public class FilterPathTests extends ESTestCase {
}
public void testSimpleWildcardFilterPath() {
- FilterPath[] filterPaths = FilterPath.compile("*");
+ FilterPath[] filterPaths = FilterPath.compile(singleton("*"));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -206,7 +210,7 @@ public class FilterPathTests extends ESTestCase {
public void testWildcardInNameFilterPath() {
String input = "f*o.bar";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -232,7 +236,7 @@ public class FilterPathTests extends ESTestCase {
}
public void testDoubleWildcardFilterPath() {
- FilterPath[] filterPaths = FilterPath.compile("**");
+ FilterPath[] filterPaths = FilterPath.compile(singleton("**"));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -250,7 +254,7 @@ public class FilterPathTests extends ESTestCase {
public void testStartsWithDoubleWildcardFilterPath() {
String input = "**.bar";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -274,7 +278,7 @@ public class FilterPathTests extends ESTestCase {
public void testContainsDoubleWildcardFilterPath() {
String input = "foo.**.bar";
- FilterPath[] filterPaths = FilterPath.compile(input);
+ FilterPath[] filterPaths = FilterPath.compile(singleton(input));
assertNotNull(filterPaths);
assertThat(filterPaths, arrayWithSize(1));
@@ -302,7 +306,7 @@ public class FilterPathTests extends ESTestCase {
}
public void testMultipleFilterPaths() {
- String[] inputs = {"foo.**.bar.*", "test.dot\\.ted"};
+ Set<String> inputs = Sets.newHashSet("foo.**.bar.*", "test.dot\\.ted");
FilterPath[] filterPaths = FilterPath.compile(inputs);
assertNotNull(filterPaths);
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java
index 17c2a590ec..3bfaa42188 100644
--- a/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/yaml/YamlXContentTests.java
@@ -19,9 +19,14 @@
package org.elasticsearch.common.xcontent.yaml;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
import org.elasticsearch.common.xcontent.XContentType;
+import java.io.ByteArrayOutputStream;
+
public class YamlXContentTests extends BaseXContentTestCase {
@Override
@@ -29,4 +34,9 @@ public class YamlXContentTests extends BaseXContentTestCase {
return XContentType.YAML;
}
+ public void testBigInteger() throws Exception {
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ JsonGenerator generator = new YAMLFactory().createGenerator(os);
+ doTestBigInteger(generator, os);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java
index 4ff4c4cd03..481d15020f 100644
--- a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java
@@ -18,9 +18,9 @@
*/
package org.elasticsearch.discovery;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@@ -45,10 +45,10 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase
final boolean fail;
final DiscoveryNode node;
final CyclicBarrier barrier;
- final ESLogger logger;
+ final Logger logger;
final BlockingClusterStatePublishResponseHandler handler;
- public PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, ESLogger logger, BlockingClusterStatePublishResponseHandler handler) {
+ public PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, Logger logger, BlockingClusterStatePublishResponseHandler handler) {
this.fail = fail;
this.node = node;
diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java
index 057b54c7a0..3b436f4541 100644
--- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java
@@ -18,13 +18,10 @@
*/
package org.elasticsearch.discovery;
-import org.elasticsearch.Version;
import org.elasticsearch.common.inject.ModuleTestCase;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.discovery.local.LocalDiscovery;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
-import org.elasticsearch.node.Node;
import org.elasticsearch.test.NoopDiscovery;
/**
diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
index 9acb1f738b..764e363d4d 100644
--- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
+++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java
@@ -19,6 +19,8 @@
package org.elasticsearch.discovery;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteResponse;
@@ -47,8 +49,8 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import org.elasticsearch.discovery.zen.ping.ZenPing;
@@ -57,6 +59,7 @@ import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.store.IndicesStoreIntegrationIT;
+import org.elasticsearch.monitor.jvm.HotThreads;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@@ -68,17 +71,18 @@ import org.elasticsearch.test.disruption.IntermittentLongGCDisruption;
import org.elasticsearch.test.disruption.LongGCDisruption;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.disruption.NetworkDisruption.Bridge;
+import org.elasticsearch.test.disruption.NetworkDisruption.DisruptedLinks;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDelay;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive;
-import org.elasticsearch.test.disruption.NetworkDisruption.DisruptedLinks;
import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
import org.elasticsearch.test.disruption.SingleNodeDisruption;
import org.elasticsearch.test.disruption.SlowClusterStateProcessing;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
@@ -106,16 +110,19 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
@ESIntegTestCase.SuppressLocalMode
-@TestLogging("_root:DEBUG,cluster.service:TRACE")
+@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE")
public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
private static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places.
@@ -160,7 +167,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
private List<String> startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws
ExecutionException, InterruptedException {
- configureUnicastCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
+ configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
ensureStableCluster(numberOfNodes);
@@ -180,6 +187,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
+ .put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this
+ // value and the time of disruption and does not recover immediately
+ // when disruption is stop. We should make sure we recover faster
+ // then the default of 30s, causing ensureGreen and friends to time out
+
.build();
@Override
@@ -187,15 +199,15 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
return Arrays.asList(MockTransportService.TestPlugin.class);
}
- private void configureUnicastCluster(
+ private void configureCluster(
int numberOfNodes,
@Nullable int[] unicastHostsOrdinals,
int minimumMasterNode
) throws ExecutionException, InterruptedException {
- configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
+ configureCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
}
- private void configureUnicastCluster(
+ private void configureCluster(
Settings settings,
int numberOfNodes,
@Nullable int[] unicastHostsOrdinals,
@@ -381,7 +393,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition
* and verifies that all node agree on the new cluster state
*/
- @TestLogging("_root:DEBUG,cluster.service:TRACE,gateway:TRACE,indices.store:TRACE")
+ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.gateway:TRACE,org.elasticsearch.indices.store:TRACE")
public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception {
final List<String> nodes = startCluster(3);
@@ -451,8 +463,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* <p>
* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
*/
- @TestLogging("_root:DEBUG,action.index:TRACE,action.get:TRACE,discovery:TRACE,cluster.service:TRACE,"
- + "indices.recovery:TRACE,indices.cluster:TRACE")
+ @TestLogging("_root:DEBUG,org.elasticsearch.action.index:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE,org.elasticsearch.cluster.service:TRACE,"
+ + "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE")
public void testAckedIndexing() throws Exception {
final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
@@ -506,7 +518,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
} catch (ElasticsearchException e) {
exceptedExceptions.add(e);
- logger.trace("[{}] failed id [{}] through node [{}]", e, name, id, node);
+ final String docId = id;
+ logger.trace(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e);
} finally {
countDownLatchRef.get().countDown();
logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount());
@@ -514,7 +529,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
} catch (InterruptedException e) {
// fine - semaphore interrupt
} catch (AssertionError | Exception e) {
- logger.info("unexpected exception in background thread of [{}]", e, node);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e);
}
}
});
@@ -630,6 +645,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
* that already are following another elected master node. These nodes should reject this cluster state and prevent
* them from following the stale master.
*/
+ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE")
public void testStaleMasterNotHijackingMajority() throws Exception {
// 3 node cluster with unicast discovery and minimum_master_nodes set to 2:
final List<String> nodes = startCluster(3, 2);
@@ -686,7 +702,19 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// Wait for the majority side to get stable
assertDifferentMaster(majoritySide.get(0), oldMasterNode);
assertDifferentMaster(majoritySide.get(1), oldMasterNode);
- assertDiscoveryCompleted(majoritySide);
+
+ // the test is periodically tripping on the following assertion. To find out which threads are blocking the nodes from making
+ // progress we print a stack dump
+ boolean failed = true;
+ try {
+ assertDiscoveryCompleted(majoritySide);
+ failed = false;
+ } finally {
+ if (failed) {
+ logger.error("discovery failed to complete, probably caused by a blocked thread: {}",
+ new HotThreads().busiestThreads(Integer.MAX_VALUE).ignoreIdleThreads(false).detect());
+ }
+ }
// The old master node is frozen, but here we submit a cluster state update task that doesn't get executed,
// but will be queued and once the old master node un-freezes it gets executed.
@@ -701,7 +729,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.warn("failure [{}]", e, source);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure [{}]", source), e);
}
});
@@ -1006,7 +1034,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
}
public void testClusterFormingWithASlowNode() throws Exception {
- configureUnicastCluster(3, null, 2);
+ configureCluster(3, null, 2);
SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000);
@@ -1069,7 +1097,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
*/
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
// don't use DEFAULT settings (which can cause node disconnects on a slow CI machine)
- configureUnicastCluster(Settings.EMPTY, 3, null, 1);
+ configureCluster(Settings.EMPTY, 3, null, 1);
InternalTestCluster.Async<String> masterNodeFuture = internalCluster().startMasterOnlyNodeAsync();
InternalTestCluster.Async<String> node_1Future = internalCluster().startDataOnlyNodeAsync();
@@ -1110,7 +1138,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception {
// test for https://github.com/elastic/elasticsearch/issues/8823
- configureUnicastCluster(2, null, 1);
+ configureCluster(2, null, 1);
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNode(Settings.EMPTY);
@@ -1141,7 +1169,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
.build();
final String idxName = "test";
- configureUnicastCluster(settings, 3, null, 2);
+ configureCluster(settings, 3, null, 2);
InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
dataNode.get();
@@ -1170,6 +1198,61 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
assertFalse(client().admin().indices().prepareExists(idxName).get().isExists());
}
+ public void testElectMasterWithLatestVersion() throws Exception {
+ configureCluster(3, null, 2);
+ final Set<String> nodes = new HashSet<>(internalCluster().startNodesAsync(3).get());
+ ensureStableCluster(3);
+ ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect());
+ internalCluster().setDisruptionScheme(isolateAllNodes);
+
+ logger.info("--> forcing a complete election to make sure \"preferred\" master is elected");
+ isolateAllNodes.startDisrupting();
+ for (String node: nodes) {
+ assertNoMaster(node);
+ }
+ isolateAllNodes.stopDisrupting();
+ ensureStableCluster(3);
+ final String preferredMasterName = internalCluster().getMasterName();
+ final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode();
+ for (String node: nodes) {
+ DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode();
+ assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId()));
+ }
+
+ logger.info("--> preferred master is {}", preferredMaster);
+ final Set<String> nonPreferredNodes = new HashSet<>(nodes);
+ nonPreferredNodes.remove(preferredMasterName);
+ final ServiceDisruptionScheme isolatePreferredMaster =
+ new NetworkDisruption(
+ new NetworkDisruption.TwoPartitions(
+ Collections.singleton(preferredMasterName), nonPreferredNodes),
+ new NetworkDisconnect());
+ internalCluster().setDisruptionScheme(isolatePreferredMaster);
+ isolatePreferredMaster.startDisrupting();
+
+ assertAcked(client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(
+ INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1,
+ INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0
+ ));
+
+ internalCluster().clearDisruptionScheme(false);
+ internalCluster().setDisruptionScheme(isolateAllNodes);
+
+ logger.info("--> forcing a complete election again");
+ isolateAllNodes.startDisrupting();
+ for (String node: nodes) {
+ assertNoMaster(node);
+ }
+
+ isolateAllNodes.stopDisrupting();
+
+ final ClusterState state = client().admin().cluster().prepareState().get().getState();
+ if (state.metaData().hasIndex("test") == false) {
+ fail("index 'test' was lost. current cluster state: " + state.prettyPrint());
+ }
+
+ }
+
protected NetworkDisruption addRandomDisruptionType(TwoPartitions partitions) {
final NetworkLinkDisruptionType disruptionType;
if (randomBoolean()) {
diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java
index b1b7749d88..d51447c929 100644
--- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java
@@ -147,7 +147,7 @@ public class ZenFaultDetectionTests extends ESTestCase {
return version;
}
},
- threadPool);
+ threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
return transportService;
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java
index b31b0cbaa5..737607df6b 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java
@@ -23,7 +23,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService.MasterCandidate;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -31,6 +31,10 @@ import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
public class ElectMasterServiceTests extends ESTestCase {
@@ -55,6 +59,22 @@ public class ElectMasterServiceTests extends ESTestCase {
return nodes;
}
+ List<MasterCandidate> generateRandomCandidates() {
+ int count = scaledRandomIntBetween(1, 100);
+ ArrayList<MasterCandidate> candidates = new ArrayList<>(count);
+ for (int i = 0; i < count; i++) {
+ Set<DiscoveryNode.Role> roles = new HashSet<>();
+ roles.add(DiscoveryNode.Role.MASTER);
+ DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
+ roles, Version.CURRENT);
+ candidates.add(new MasterCandidate(node, randomBoolean() ? MasterCandidate.UNRECOVERED_CLUSTER_VERSION : randomPositiveLong()));
+ }
+
+ Collections.shuffle(candidates, random());
+ return candidates;
+ }
+
+
public void testSortByMasterLikelihood() {
List<DiscoveryNode> nodes = generateRandomNodes();
List<DiscoveryNode> sortedNodes = electMasterService().sortByMasterLikelihood(nodes);
@@ -69,36 +89,53 @@ public class ElectMasterServiceTests extends ESTestCase {
}
prevNode = node;
}
+ }
+ public void testTieBreakActiveMasters() {
+ List<DiscoveryNode> nodes = generateRandomCandidates().stream().map(MasterCandidate::getNode).collect(Collectors.toList());
+ DiscoveryNode bestMaster = electMasterService().tieBreakActiveMasters(nodes);
+ for (DiscoveryNode node: nodes) {
+ if (node.equals(bestMaster) == false) {
+ assertTrue(bestMaster.getId().compareTo(node.getId()) < 0);
+ }
+ }
}
- public void testElectMaster() {
- List<DiscoveryNode> nodes = generateRandomNodes();
+ public void testHasEnoughNodes() {
+ List<DiscoveryNode> nodes = rarely() ? Collections.emptyList() : generateRandomNodes();
ElectMasterService service = electMasterService();
- int min_master_nodes = randomIntBetween(0, nodes.size());
- service.minimumMasterNodes(min_master_nodes);
+ int masterNodes = (int) nodes.stream().filter(DiscoveryNode::isMasterNode).count();
+ service.minimumMasterNodes(randomIntBetween(-1, masterNodes));
+ assertThat(service.hasEnoughMasterNodes(nodes), equalTo(masterNodes > 0));
+ service.minimumMasterNodes(masterNodes + 1 + randomIntBetween(0, nodes.size()));
+ assertFalse(service.hasEnoughMasterNodes(nodes));
+ }
- int master_nodes = 0;
- for (DiscoveryNode node : nodes) {
- if (node.isMasterNode()) {
- master_nodes++;
- }
- }
- DiscoveryNode master = null;
- if (service.hasEnoughMasterNodes(nodes)) {
- master = service.electMaster(nodes);
- }
+ public void testHasEnoughCandidates() {
+ List<MasterCandidate> candidates = rarely() ? Collections.emptyList() : generateRandomCandidates();
+ ElectMasterService service = electMasterService();
+ service.minimumMasterNodes(randomIntBetween(-1, candidates.size()));
+ assertThat(service.hasEnoughCandidates(candidates), equalTo(candidates.size() > 0));
+ service.minimumMasterNodes(candidates.size() + 1 + randomIntBetween(0, candidates.size()));
+ assertFalse(service.hasEnoughCandidates(candidates));
+ }
- if (master_nodes == 0) {
- assertNull(master);
- } else if (min_master_nodes > 0 && master_nodes < min_master_nodes) {
- assertNull(master);
- } else {
- assertNotNull(master);
- for (DiscoveryNode node : nodes) {
- if (node.isMasterNode()) {
- assertTrue(master.getId().compareTo(node.getId()) <= 0);
- }
+ public void testElectMaster() {
+ List<MasterCandidate> candidates = generateRandomCandidates();
+ ElectMasterService service = electMasterService();
+ int minMasterNodes = randomIntBetween(0, candidates.size());
+ service.minimumMasterNodes(minMasterNodes);
+ MasterCandidate master = service.electMaster(candidates);
+ assertNotNull(master);
+ for (MasterCandidate candidate : candidates) {
+ if (candidate.getNode().equals(master.getNode())) {
+ // nothing much to test here
+ } else if (candidate.getClusterStateVersion() == master.getClusterStateVersion()) {
+ assertThat("candidate " + candidate + " has a lower or equal id than master " + master, candidate.getNode().getId(),
+ greaterThan(master.getNode().getId()));
+ } else {
+ assertThat("candidate " + master + " has a higher cluster state version than candidate " + candidate,
+ master.getClusterStateVersion(), greaterThan(candidate.getClusterStateVersion()));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
index b71310e2f6..907d378699 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
@@ -18,7 +18,8 @@
*/
package org.elasticsearch.discovery.zen;
-import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
@@ -42,7 +43,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.BaseFuture;
import org.elasticsearch.discovery.DiscoverySettings;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
@@ -77,19 +77,20 @@ import java.util.stream.StreamSupport;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.shuffle;
+import static org.elasticsearch.cluster.ESAllocationTestCase.createAllocationService;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+import static org.elasticsearch.cluster.routing.RoutingTableTests.updateActiveAllocations;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
-import static org.elasticsearch.test.ESAllocationTestCase.createAllocationService;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
-@TestLogging("discovery.zen:TRACE")
+@TestLogging("org.elasticsearch.discovery.zen:TRACE,org.elasticsearch.cluster.service:TRACE")
public class NodeJoinControllerTests extends ESTestCase {
private static ThreadPool threadPool;
@@ -597,7 +598,6 @@ public class NodeJoinControllerTests extends ESTestCase {
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
- stateBuilder.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex());
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
@@ -607,20 +607,23 @@ public class NodeJoinControllerTests extends ESTestCase {
final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode;
final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode;
final boolean primaryStarted = randomBoolean();
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, null, true,
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, true,
primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING,
primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there")));
if (primaryStarted) {
boolean replicaStared = randomBoolean();
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, null, false,
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, false,
replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING,
replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there")));
} else {
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, null, false,
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, false,
ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks")));
}
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
- stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
+ IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build();
+ IndexMetaData updatedIndexMetaData = updateActiveAllocations(indexRoutingTable, indexMetaData);
+ stateBuilder.metaData(MetaData.builder().put(updatedIndexMetaData, false).generateClusterUuidIfNeeded())
+ .routingTable(RoutingTable.builder().add(indexRoutingTable).build());
}
setState(clusterService, stateBuilder.build());
@@ -720,7 +723,7 @@ public class NodeJoinControllerTests extends ESTestCase {
@Override
public void onFailure(Exception e) {
- logger.error("unexpected error for {}", e, future);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error for {}", future), e);
future.markAsFailed(e);
}
});
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java
index 35335a8ede..4492cdb52e 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java
@@ -26,9 +26,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -134,7 +132,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
final AllocationService allocationService = mock(AllocationService.class);
when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class)))
- .thenReturn(mock(RoutingAllocation.Result.class));
+ .thenAnswer(im -> im.getArguments()[0]);
final BiFunction<ClusterState, String, ClusterState> rejoin = (cs, r) -> {
fail("rejoin should not be invoked");
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
index 6248df7370..d9a8c9be7f 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
@@ -34,14 +34,12 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryStats;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
@@ -60,10 +58,8 @@ import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@@ -77,8 +73,6 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.nullValue;
-import static org.hamcrest.Matchers.sameInstance;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
@ESIntegTestCase.SuppressLocalMode
@@ -120,7 +114,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth()
.setWaitForEvents(Priority.LANGUID)
.setWaitForNodes("4")
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealthResponse.isTimedOut(), is(false));
@@ -293,44 +287,6 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
}
}
- public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException {
- Settings nodeSettings = Settings.builder()
- .put("discovery.type", "zen") // <-- To override the local setting if set externally
- .build();
- String nodeName = internalCluster().startNode(nodeSettings);
- ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
- ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName);
- DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0),
- emptyMap(), emptySet(), previousMajorVersion);
- final AtomicReference<IllegalStateException> holder = new AtomicReference<>();
- zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() {
- @Override
- public void onSuccess() {
- }
-
- @Override
- public void onFailure(Exception e) {
- holder.set((IllegalStateException) e);
- }
- });
-
- assertThat(holder.get(), notNullValue());
- assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [" + previousMajorVersion
- + "] that is lower than the minimum compatible version [" + Version.CURRENT.minimumCompatibilityVersion() + "]"));
- }
-
- public void testJoinElectedMaster_incompatibleMinVersion() {
- ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY);
-
- DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(),
- Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
- assertThat(electMasterService.electMaster(Collections.singletonList(node)), sameInstance(node));
- node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(), emptySet(), previousMajorVersion);
- assertThat("Can't join master because version " + previousMajorVersion
- + " is lower than the minimum compatable version " + Version.CURRENT + " can support",
- electMasterService.electMaster(Collections.singletonList(node)), nullValue());
- }
-
public void testDiscoveryStats() throws IOException {
String expectedStatsJsonResponse = "{\n" +
" \"discovery\" : {\n" +
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
index ba4c14c205..a7291dc373 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
@@ -19,31 +19,48 @@
package org.elasticsearch.discovery.zen;
+import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Version;
+import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
+import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNode.Role;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.discovery.zen.ping.ZenPingService;
+import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.TestThreadPool;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
+import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
+import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
+import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-/**
- */
public class ZenDiscoveryUnitTests extends ESTestCase {
public void testShouldIgnoreNewClusterState() {
@@ -107,10 +124,10 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
ArrayList<DiscoveryNode> masterNodes = new ArrayList<>();
ArrayList<DiscoveryNode> allNodes = new ArrayList<>();
for (int i = randomIntBetween(10, 20); i >= 0; i--) {
- Set<DiscoveryNode.Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())));
+ Set<Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values())));
DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
roles, Version.CURRENT);
- responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean()));
+ responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomLong()));
allNodes.add(node);
if (node.isMasterNode()) {
masterNodes.add(node);
@@ -118,8 +135,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
}
boolean ignore = randomBoolean();
- List<ZenPing.PingResponse> filtered = ZenDiscovery.filterPingResponses(
- responses.toArray(new ZenPing.PingResponse[responses.size()]), ignore, logger);
+ List<ZenPing.PingResponse> filtered = ZenDiscovery.filterPingResponses(responses, ignore, logger);
final List<DiscoveryNode> filteredNodes = filtered.stream().map(ZenPing.PingResponse::node).collect(Collectors.toList());
if (ignore) {
assertThat(filteredNodes, equalTo(masterNodes));
@@ -127,4 +143,97 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
assertThat(filteredNodes, equalTo(allNodes));
}
}
+
+ public void testNodesUpdatedAfterClusterStatePublished() throws Exception {
+ ThreadPool threadPool = new TestThreadPool(getClass().getName());
+ // randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure
+ int minMasterNodes = randomBoolean() ? 3 : 1;
+ Settings settings = Settings.builder()
+ .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build();
+
+ ArrayList<Closeable> toClose = new ArrayList<>();
+ try {
+ Set<DiscoveryNode> expectedFDNodes = null;
+
+ final MockTransportService masterTransport = MockTransportService.local(settings, Version.CURRENT, threadPool);
+ masterTransport.start();
+ DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT);
+ toClose.add(masterTransport);
+ masterTransport.setLocalNode(masterNode);
+ ClusterState state = ClusterStateCreationUtils.state(masterNode, masterNode, masterNode);
+ // build the zen discovery and cluster service
+ ClusterService masterClusterService = createClusterService(threadPool, masterNode);
+ toClose.add(masterClusterService);
+ // TODO: clustername shouldn't be stored twice in cluster service, but for now, work around it
+ state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build();
+ setState(masterClusterService, state);
+ ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool);
+ toClose.add(masterZen);
+ masterTransport.acceptIncomingRequests();
+
+ final MockTransportService otherTransport = MockTransportService.local(settings, Version.CURRENT, threadPool);
+ otherTransport.start();
+ toClose.add(otherTransport);
+ DiscoveryNode otherNode = new DiscoveryNode("other", otherTransport.boundAddress().publishAddress(), Version.CURRENT);
+ otherTransport.setLocalNode(otherNode);
+ final ClusterState otherState = ClusterState.builder(masterClusterService.getClusterName())
+ .nodes(DiscoveryNodes.builder().add(otherNode).localNodeId(otherNode.getId())).build();
+ ClusterService otherClusterService = createClusterService(threadPool, masterNode);
+ toClose.add(otherClusterService);
+ setState(otherClusterService, otherState);
+ ZenDiscovery otherZen = buildZenDiscovery(settings, otherTransport, otherClusterService, threadPool);
+ toClose.add(otherZen);
+ otherTransport.acceptIncomingRequests();
+
+
+ masterTransport.connectToNode(otherNode);
+ otherTransport.connectToNode(masterNode);
+
+ // a new cluster state with a new discovery node (we will test if the cluster state
+ // was updated by the presence of this node in NodesFaultDetection)
+ ClusterState newState = ClusterState.builder(masterClusterService.state()).incrementVersion().nodes(
+ DiscoveryNodes.builder(state.nodes()).add(otherNode).masterNodeId(masterNode.getId())
+ ).build();
+
+ try {
+ // publishing a new cluster state
+ ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state);
+ AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1);
+ expectedFDNodes = masterZen.getFaultDetectionNodes();
+ masterZen.publish(clusterChangedEvent, listener);
+ listener.await(1, TimeUnit.HOURS);
+ // publish was a success, update expected FD nodes based on new cluster state
+ expectedFDNodes = fdNodesForState(newState, masterNode);
+ } catch (Discovery.FailedToCommitClusterStateException e) {
+ // not successful, so expectedFDNodes above should remain what it was originally assigned
+ assertEquals(3, minMasterNodes); // ensure min master nodes is the higher value, otherwise we shouldn't fail
+ }
+
+ assertEquals(expectedFDNodes, masterZen.getFaultDetectionNodes());
+ } finally {
+ IOUtils.close(toClose);
+ terminate(threadPool);
+ }
+ }
+
+ private ZenDiscovery buildZenDiscovery(Settings settings, TransportService service, ClusterService clusterService, ThreadPool threadPool) {
+ ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet());
+ ElectMasterService electMasterService = new ElectMasterService(settings);
+ ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService,
+ clusterSettings, zenPingService, electMasterService);
+ zenDiscovery.start();
+ return zenDiscovery;
+ }
+
+ private Set<DiscoveryNode> fdNodesForState(ClusterState clusterState, DiscoveryNode localNode) {
+ final Set<DiscoveryNode> discoveryNodes = new HashSet<>();
+ clusterState.getNodes().getNodes().valuesIt().forEachRemaining(discoveryNode -> {
+ // the local node isn't part of the nodes that are pinged (don't ping ourselves)
+ if (discoveryNode.getId().equals(localNode.getId()) == false) {
+ discoveryNodes.add(discoveryNode);
+ }
+ });
+ return discoveryNodes;
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java
index 72674f44e3..2275756e8e 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java
@@ -29,6 +29,7 @@ import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.List;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
@@ -39,7 +40,7 @@ public class ZenPingTests extends ESTestCase {
DiscoveryNode[] nodes = new DiscoveryNode[randomIntBetween(1, 30)];
long maxIdPerNode[] = new long[nodes.length];
DiscoveryNode masterPerNode[] = new DiscoveryNode[nodes.length];
- boolean hasJoinedOncePerNode[] = new boolean[nodes.length];
+ long clusterStateVersionPerNode[] = new long[nodes.length];
ArrayList<ZenPing.PingResponse> pings = new ArrayList<>();
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new DiscoveryNode("" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
@@ -51,9 +52,9 @@ public class ZenPingTests extends ESTestCase {
if (randomBoolean()) {
masterNode = nodes[randomInt(nodes.length - 1)];
}
- boolean hasJoinedOnce = randomBoolean();
+ long clusterStateVersion = randomLong();
ZenPing.PingResponse ping = new ZenPing.PingResponse(nodes[node], masterNode, ClusterName.CLUSTER_NAME_SETTING.
- getDefault(Settings.EMPTY), hasJoinedOnce);
+ getDefault(Settings.EMPTY), clusterStateVersion);
if (rarely()) {
// ignore some pings
continue;
@@ -61,7 +62,7 @@ public class ZenPingTests extends ESTestCase {
// update max ping info
maxIdPerNode[node] = ping.id();
masterPerNode[node] = masterNode;
- hasJoinedOncePerNode[node] = hasJoinedOnce;
+ clusterStateVersionPerNode[node] = clusterStateVersion;
pings.add(ping);
}
@@ -69,15 +70,15 @@ public class ZenPingTests extends ESTestCase {
Collections.shuffle(pings, random());
ZenPing.PingCollection collection = new ZenPing.PingCollection();
- collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()]));
+ collection.addPings(pings);
- ZenPing.PingResponse[] aggregate = collection.toArray();
+ List<ZenPing.PingResponse> aggregate = collection.toList();
for (ZenPing.PingResponse ping : aggregate) {
int nodeId = Integer.parseInt(ping.node().getId());
assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
assertThat(masterPerNode[nodeId], equalTo(ping.master()));
- assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));
+ assertThat(clusterStateVersionPerNode[nodeId], equalTo(ping.getClusterStateVersion()));
maxIdPerNode[nodeId] = -1; // mark as seen
}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
index ea5779c33b..bdffb5f99d 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
@@ -20,6 +20,9 @@
package org.elasticsearch.discovery.zen.ping.unicast;
import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -31,7 +34,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
@@ -45,16 +48,18 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.net.InetSocketAddress;
+import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
+import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
-public class UnicastZenPingIT extends ESTestCase {
+public class UnicastZenPingTests extends ESTestCase {
public void testSimplePings() throws InterruptedException {
int startPort = 11000 + randomIntBetween(0, 1000);
int endPort = startPort + 10;
@@ -78,6 +83,8 @@ public class UnicastZenPingIT extends ESTestCase {
Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion);
NetworkHandle handleD = startServices(settingsMismatch, threadPool, networkService, "UZP_D", versionD);
+ final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomPositiveLong()).build();
+
Settings hostsSettings = Settings.builder()
.putArray("discovery.zen.ping.unicast.hosts",
NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())),
@@ -96,8 +103,8 @@ public class UnicastZenPingIT extends ESTestCase {
}
@Override
- public boolean nodeHasJoinedClusterOnce() {
- return false;
+ public ClusterState clusterState() {
+ return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
}
});
zenPingA.start();
@@ -110,8 +117,8 @@ public class UnicastZenPingIT extends ESTestCase {
}
@Override
- public boolean nodeHasJoinedClusterOnce() {
- return true;
+ public ClusterState clusterState() {
+ return state;
}
});
zenPingB.start();
@@ -130,8 +137,8 @@ public class UnicastZenPingIT extends ESTestCase {
}
@Override
- public boolean nodeHasJoinedClusterOnce() {
- return false;
+ public ClusterState clusterState() {
+ return state;
}
});
zenPingC.start();
@@ -144,36 +151,38 @@ public class UnicastZenPingIT extends ESTestCase {
}
@Override
- public boolean nodeHasJoinedClusterOnce() {
- return false;
+ public ClusterState clusterState() {
+ return state;
}
});
zenPingD.start();
try {
logger.info("ping from UZP_A");
- ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
- assertThat(pingResponses.length, equalTo(1));
- assertThat(pingResponses[0].node().getId(), equalTo("UZP_B"));
- assertTrue(pingResponses[0].hasJoinedOnce());
+ Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.size(), equalTo(1));
+ ZenPing.PingResponse ping = pingResponses.iterator().next();
+ assertThat(ping.node().getId(), equalTo("UZP_B"));
+ assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
assertCounters(handleA, handleA, handleB, handleC, handleD);
// ping again, this time from B,
logger.info("ping from UZP_B");
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1));
- assertThat(pingResponses.length, equalTo(1));
- assertThat(pingResponses[0].node().getId(), equalTo("UZP_A"));
- assertFalse(pingResponses[0].hasJoinedOnce());
+ assertThat(pingResponses.size(), equalTo(1));
+ ping = pingResponses.iterator().next();
+ assertThat(ping.node().getId(), equalTo("UZP_A"));
+ assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));
assertCounters(handleB, handleA, handleB, handleC, handleD);
logger.info("ping from UZP_C");
pingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(1));
- assertThat(pingResponses.length, equalTo(0));
+ assertThat(pingResponses.size(), equalTo(0));
assertCounters(handleC, handleA, handleB, handleC, handleD);
logger.info("ping from UZP_D");
pingResponses = zenPingD.pingAndWait(TimeValue.timeValueSeconds(1));
- assertThat(pingResponses.length, equalTo(0));
+ assertThat(pingResponses.size(), equalTo(0));
assertCounters(handleD, handleA, handleB, handleC, handleD);
} finally {
zenPingA.close();
@@ -201,7 +210,8 @@ public class UnicastZenPingIT extends ESTestCase {
Version version) {
MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version);
- final TransportService transportService = new TransportService(settings, transport, threadPool);
+ final TransportService transportService = new TransportService(settings, transport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java
index b1658845af..50ec06694f 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.discovery.zen.publish;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -36,7 +37,6 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
@@ -82,7 +82,7 @@ import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
-@TestLogging("discovery.zen.publish:TRACE")
+@TestLogging("org.elasticsearch.discovery.zen.publish:TRACE")
public class PublishClusterStateActionTests extends ESTestCase {
private static final ClusterName CLUSTER_NAME = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);
@@ -98,9 +98,9 @@ public class PublishClusterStateActionTests extends ESTestCase {
public volatile ClusterState clusterState;
- private final ESLogger logger;
+ private final Logger logger;
- public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, ESLogger logger) {
+ public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, Logger logger) {
this.discoveryNode = discoveryNode;
this.service = service;
this.listener = listener;
@@ -145,21 +145,22 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public MockNode createMockNode(final String name) throws Exception {
- return createMockNode(name, Settings.EMPTY);
+ return createMockNode(name, Settings.EMPTY, null);
}
- public MockNode createMockNode(String name, Settings settings) throws Exception {
- return createMockNode(name, settings, null);
+ public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception {
+ return createMockNode(name, basSettings, listener, threadPool, logger, nodes);
}
- public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception {
+ public static MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener,
+ ThreadPool threadPool, Logger logger, Map<String, MockNode> nodes) throws Exception {
final Settings settings = Settings.builder()
.put("name", name)
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(basSettings)
.build();
- MockTransportService service = buildTransportService(settings);
+ MockTransportService service = buildTransportService(settings, threadPool);
DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(),
NodeEnvironment.generateNodeId(settings));
MockNode node = new MockNode(discoveryNode, service, listener, logger);
@@ -222,20 +223,19 @@ public class PublishClusterStateActionTests extends ESTestCase {
public void tearDown() throws Exception {
super.tearDown();
for (MockNode curNode : nodes.values()) {
- curNode.action.close();
curNode.service.close();
}
terminate(threadPool);
}
- protected MockTransportService buildTransportService(Settings settings) {
- MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
+ private static MockTransportService buildTransportService(Settings settings, ThreadPool threadPool) {
+ MockTransportService transportService = MockTransportService.local(settings, Version.CURRENT, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
return transportService;
}
- protected MockPublishAction buildPublishClusterStateAction(
+ private static MockPublishAction buildPublishClusterStateAction(
Settings settings,
MockTransportService transportService,
Supplier<ClusterState> clusterStateSupplier,
@@ -253,8 +253,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public void testSimpleClusterStatePublishing() throws Exception {
- MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster();
- MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
+ MockNode nodeA = createMockNode("nodeA").setAsMaster();
+ MockNode nodeB = createMockNode("nodeB");
// Initial cluster state
ClusterState clusterState = nodeA.clusterState;
@@ -282,7 +282,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
// Adding new node - this node should get full cluster state while nodeB should still be getting diffs
- MockNode nodeC = createMockNode("nodeC", Settings.EMPTY);
+ MockNode nodeC = createMockNode("nodeC");
// cluster state update 3 - register node C
previousClusterState = clusterState;
@@ -336,7 +336,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
fail("Shouldn't send cluster state to myself");
}).setAsMaster();
- MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
+ MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
@@ -444,7 +444,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
}).setAsMaster();
- MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
+ MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
@@ -495,7 +495,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
final int dataNodes = randomIntBetween(0, 5);
final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
for (int i = 0; i < dataNodes; i++) {
- discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode);
+ discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode);
}
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
@@ -521,7 +521,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h")
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing
- MockNode master = createMockNode("master", settings.build());
+ MockNode master = createMockNode("master", settings.build(), null);
// randomize things a bit
int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes];
@@ -551,7 +551,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter
for (int i = 0; i < dataNodes; i++) {
- final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build());
+ final MockNode mockNode = createMockNode("data_" + i,
+ Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(), null);
discoveryNodesBuilder.add(mockNode.discoveryNode);
if (randomBoolean()) {
// we really don't care - just chaos monkey
@@ -726,8 +727,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
Settings settings = Settings.builder()
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout
- MockNode master = createMockNode("master", settings);
- MockNode node = createMockNode("node", settings);
+ MockNode master = createMockNode("master", settings, null);
+ MockNode node = createMockNode("node", settings, null);
ClusterState state = ClusterState.builder(master.clusterState)
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
@@ -843,7 +844,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
assertFalse(actual.wasReadFromDiff());
}
- static class MockPublishAction extends PublishClusterStateAction {
+ public static class MockPublishAction extends PublishClusterStateAction {
AtomicBoolean timeoutOnSend = new AtomicBoolean();
AtomicBoolean errorOnSend = new AtomicBoolean();
diff --git a/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java
index 8d7a9bdca0..d9371df09a 100644
--- a/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java
+++ b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java
@@ -37,7 +37,9 @@ public class AliasedIndexDocumentActionsIT extends DocumentActionsIT {
// ignore
}
logger.info("--> creating index test");
- client().admin().indices().create(createIndexRequest("test1").alias(new Alias("test"))).actionGet();
+ client().admin().indices().create(createIndexRequest("test1")
+ .mapping("type1", "name", "type=keyword,store=true")
+ .alias(new Alias("test"))).actionGet();
}
@Override
diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java
index abc07da0b3..d198529f8d 100644
--- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java
+++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java
@@ -33,6 +33,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import java.io.IOException;
@@ -51,7 +52,7 @@ import static org.hamcrest.Matchers.nullValue;
*/
public class DocumentActionsIT extends ESIntegTestCase {
protected void createIndex() {
- createIndex(getConcreteIndexName());
+ ElasticsearchAssertions.assertAcked(prepareCreate(getConcreteIndexName()).addMapping("type1", "name", "type=keyword,store=true"));
}
protected String getConcreteIndexName() {
@@ -103,7 +104,7 @@ public class DocumentActionsIT extends ESIntegTestCase {
logger.info("Get [type1/1] with script");
for (int i = 0; i < 5; i++) {
- getResult = client().prepareGet("test", "type1", "1").setFields("name").execute().actionGet();
+ getResult = client().prepareGet("test", "type1", "1").setStoredFields("name").execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat(getResult.isExists(), equalTo(true));
assertThat(getResult.getSourceAsBytes(), nullValue());
diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java
index 765cee3b6e..0ba97bee89 100644
--- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java
+++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java
@@ -136,7 +136,7 @@ public class ShardInfoIT extends ESIntegTestCase {
assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount));
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("idx")
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(healthResponse.isTimedOut(), equalTo(false));
diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
index e5c1c53dad..9c11ae6b23 100644
--- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
+++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
@@ -396,49 +396,6 @@ public class NodeEnvironmentTests extends ESTestCase {
env.close();
}
- public void testWhetherClusterFolderShouldBeUsed() throws Exception {
- Path tempNoCluster = createTempDir();
- Path tempDataPath = tempNoCluster.toAbsolutePath();
-
- Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name
- Path tempClusterPath = tempPath.toAbsolutePath();
-
- assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
- Settings settings = Settings.builder()
- .put("cluster.name", "foo")
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
- .put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
- try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
- Path nodeDataPath = env.nodeDataPaths()[0];
- assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
- }
- IOUtils.rm(tempNoCluster);
-
- Files.createDirectories(tempPath);
- assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
- settings = Settings.builder()
- .put("cluster.name", "foo")
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
- .put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
- try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
- Path nodeDataPath = env.nodeDataPaths()[0];
- assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
- }
- IOUtils.rm(tempNoCluster);
-
- // Create a directory for the cluster name
- Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER));
- assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
- settings = Settings.builder()
- .put("cluster.name", "foo")
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
- .put(Environment.PATH_DATA_SETTING.getKey(), tempClusterPath.toString()).build();
- try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
- Path nodeDataPath = env.nodeDataPaths()[0];
- assertEquals(nodeDataPath, tempClusterPath.resolve("nodes").resolve("0"));
- }
- }
-
public void testPersistentNodeId() throws IOException {
String[] paths = tmpPaths();
NodeEnvironment env = newNodeEnvironment(paths, Settings.builder()
diff --git a/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java b/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java
index 7a64abc52a..528b03bc83 100644
--- a/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java
+++ b/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java
@@ -114,7 +114,9 @@ public class ExplainActionIT extends ESIntegTestCase {
}
public void testExplainWithFields() throws Exception {
- assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "obj1.field1", "type=keyword,store=true", "obj1.field2", "type=keyword,store=true")
+ .addAlias(new Alias("alias")));
ensureGreen("test");
client().prepareIndex("test", "test", "1")
@@ -129,7 +131,7 @@ public class ExplainActionIT extends ESIntegTestCase {
refresh();
ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
.setQuery(QueryBuilders.matchAllQuery())
- .setFields("obj1.field1").get();
+ .setStoredFields("obj1.field1").get();
assertNotNull(response);
assertTrue(response.isMatch());
assertNotNull(response.getExplanation());
@@ -146,7 +148,7 @@ public class ExplainActionIT extends ESIntegTestCase {
refresh();
response = client().prepareExplain(indexOrAlias(), "test", "1")
.setQuery(QueryBuilders.matchAllQuery())
- .setFields("obj1.field1").setFetchSource(true).get();
+ .setStoredFields("obj1.field1").setFetchSource(true).get();
assertNotNull(response);
assertTrue(response.isMatch());
assertNotNull(response.getExplanation());
@@ -162,7 +164,7 @@ public class ExplainActionIT extends ESIntegTestCase {
response = client().prepareExplain(indexOrAlias(), "test", "1")
.setQuery(QueryBuilders.matchAllQuery())
- .setFields("obj1.field1", "obj1.field2").get();
+ .setStoredFields("obj1.field1", "obj1.field2").get();
assertNotNull(response);
assertTrue(response.isMatch());
String v1 = (String) response.getGetResult().field("obj1.field1").getValue();
diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
index 4a5f79a12a..8cd1b47941 100644
--- a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
+++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
@@ -23,13 +23,19 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
import org.elasticsearch.action.fieldstats.IndexConstraint;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Date;
import java.util.List;
import java.util.Locale;
@@ -513,4 +519,52 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
assertThat(response.getAllFieldStats().get("_type").isSearchable(), equalTo(true));
assertThat(response.getAllFieldStats().get("_type").isAggregatable(), equalTo(true));
}
+
+ public void testSerialization() throws IOException {
+ for (int i = 0; i < 20; i++) {
+ assertSerialization(randomFieldStats());
+ }
+ }
+
+ /**
+ * creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue}
+ **/
+ private FieldStats randomFieldStats() throws UnknownHostException {
+ int type = randomInt(5);
+ switch (type) {
+ case 0:
+ return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong());
+ case 1:
+ return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble());
+ case 2:
+ return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"),
+ new Date().getTime(), new Date().getTime());
+ case 3:
+ return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(),
+ new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20)));
+ case 4:
+ return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(),
+ InetAddress.getByName("::1"), InetAddress.getByName("::1"));
+ case 5:
+ return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
+ randomPositiveLong(), randomBoolean(), randomBoolean(),
+ InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4"));
+ default:
+ throw new IllegalArgumentException("Invalid type");
+ }
+ }
+
+ private void assertSerialization(FieldStats stats) throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ stats.writeTo(output);
+ output.flush();
+ FieldStats deserializedStats = FieldStats.readFrom(output.bytes().streamInput());
+ assertThat(stats, equalTo(deserializedStats));
+ assertThat(stats.hashCode(), equalTo(deserializedStats.hashCode()));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
index 7ea916f4a5..a998b56f64 100644
--- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
+++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
@@ -33,12 +34,11 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Priority;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.mapper.MapperParsingException;
@@ -56,7 +56,6 @@ import java.util.List;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
-import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
@@ -65,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class GatewayIndexStateIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(GatewayIndexStateIT.class);
+ private final Logger logger = Loggers.getLogger(GatewayIndexStateIT.class);
public void testMappingMetaDataParsed() throws Exception {
logger.info("--> starting 1 nodes");
@@ -409,7 +408,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
.health(Requests.clusterHealthRequest()
.waitForGreenStatus()
.waitForEvents(Priority.LANGUID)
- .waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ .waitForNoRelocatingShards(true).waitForNodes("2")).actionGet();
}
ClusterState state = client().admin().cluster().prepareState().get().getState();
IndexMetaData metaData = state.getMetaData().index("test");
@@ -471,7 +470,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
.health(Requests.clusterHealthRequest()
.waitForGreenStatus()
.waitForEvents(Priority.LANGUID)
- .waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ .waitForNoRelocatingShards(true).waitForNodes("2")).actionGet();
}
ClusterState state = client().admin().cluster().prepareState().get().getState();
IndexMetaData metaData = state.getMetaData().index("test");
@@ -508,7 +507,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
.health(Requests.clusterHealthRequest()
.waitForGreenStatus()
.waitForEvents(Priority.LANGUID)
- .waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ .waitForNoRelocatingShards(true).waitForNodes("2")).actionGet();
}
ClusterState state = client().admin().cluster().prepareState().get().getState();
MetaData metaData = state.getMetaData();
diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
index a3bb21a64b..2a37a7f0a6 100644
--- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
@@ -33,7 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.plugins.MetaDataUpgrader;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.test.TestCustomMetaData;
import org.junit.Before;
diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java
index 4cf505d839..0f0e69b264 100644
--- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.Directory;
@@ -34,7 +35,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -193,7 +193,7 @@ public class MetaDataStateFormatTests extends ESTestCase {
}
}
- public static void corruptFile(Path file, ESLogger logger) throws IOException {
+ public static void corruptFile(Path file, Logger logger) throws IOException {
Path fileToCorrupt = file;
try (final SimpleFSDirectory dir = new SimpleFSDirectory(fileToCorrupt.getParent())) {
long checksumBeforeCorruption;
diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java
index 795046ba10..3c2917f38e 100644
--- a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java
+++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java
@@ -73,7 +73,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
logger.debug("relocating index...");
client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get();
- client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get();
+ client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
ensureGreen();
assertIndexDirectoryDeleted(node1, resolveIndex);
assertIndexInMetaState(node2, index);
diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
index aeb4ff7b69..0fd89ec889 100644
--- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
@@ -23,18 +23,20 @@ import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
@@ -48,14 +50,17 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardStateMetaData;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
-import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.CLUSTER_RECOVERED;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.INDEX_CREATED;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.INDEX_REOPENED;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
@@ -77,11 +82,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testNoProcessPrimaryNotAllocatedBefore() {
final RoutingAllocation allocation;
- if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomBoolean(), Version.CURRENT);
- } else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), true, Version.V_2_1_0);
- }
+ // with old version, we can't know if a shard was allocated before or not
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(),
+ randomFrom(INDEX_CREATED, CLUSTER_RECOVERED, INDEX_REOPENED), Version.CURRENT);
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
@@ -95,9 +98,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testNoAsyncFetchData() {
final RoutingAllocation allocation;
if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.CURRENT, "allocId");
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_0);
}
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
@@ -113,9 +116,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testNoAllocationFound() {
final RoutingAllocation allocation;
if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.CURRENT, "allocId");
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_0);
}
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, null, randomBoolean());
testAllocator.allocateUnassigned(allocation);
@@ -129,7 +132,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned.
*/
public void testNoMatchingAllocationIdFound() {
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2");
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.CURRENT, "id2");
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "id1", randomBoolean());
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
@@ -143,7 +146,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* This is the case when we have old shards from pre-3.0 days.
*/
public void testNoActiveAllocationIds() {
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1);
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);
testAllocator.addData(node1, 1, null, randomBoolean());
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
@@ -159,10 +162,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testStoreException() {
final RoutingAllocation allocation;
if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
+ randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean(), new CorruptIndexException("test", "test"));
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);
testAllocator.addData(node1, 3, null, randomBoolean(), new CorruptIndexException("test", "test"));
}
testAllocator.allocateUnassigned(allocation);
@@ -179,10 +183,12 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
final RoutingAllocation allocation;
boolean useAllocationIds = randomBoolean();
if (useAllocationIds) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED),
+ randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED),
+ Version.V_2_2_0);
testAllocator.addData(node1, 3, null, randomBoolean());
}
testAllocator.allocateUnassigned(allocation);
@@ -204,12 +210,12 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
*/
public void testForceAllocatePrimary() {
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
- AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(
// since the deciders return a NO decision for allocating a shard (due to the guaranteed NO decision from the second decider),
// the allocator will see if it can force assign the primary, where the decision will be YES
new TestAllocateDecision(randomBoolean() ? Decision.YES : Decision.NO), getNoDeciderThatAllowsForceAllocate()
- });
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, false, Version.CURRENT, "allocId1");
+ ));
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, Version.CURRENT, "allocId1");
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertTrue(allocation.routingNodes().unassigned().ignored().isEmpty());
@@ -225,14 +231,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testDontAllocateOnNoOrThrottleForceAllocationDecision() {
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
boolean forceDecisionNo = randomBoolean();
- AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(
// since both deciders here return a NO decision for allocating a shard,
// the allocator will see if it can force assign the primary, where the decision will be either NO or THROTTLE,
// so the shard will remain un-initialized
new TestAllocateDecision(Decision.NO), forceDecisionNo ? getNoDeciderThatDeniesForceAllocate() :
getNoDeciderThatThrottlesForceAllocate()
- });
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, false, Version.CURRENT, "allocId1");
+ ));
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, Version.CURRENT, "allocId1");
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
@@ -248,15 +254,15 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
*/
public void testDontForceAllocateOnThrottleDecision() {
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
- AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(
// since we have a NO decision for allocating a shard (because the second decider returns a NO decision),
// the allocator will see if it can force assign the primary, and in this case,
// the TestAllocateDecision's decision for force allocating is to THROTTLE (using
// the default behavior) so despite the other decider's decision to return YES for
// force allocating the shard, we still THROTTLE due to the decision from TestAllocateDecision
new TestAllocateDecision(Decision.THROTTLE), getNoDeciderThatAllowsForceAllocate()
- });
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, false, Version.CURRENT, "allocId1");
+ ));
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, Version.CURRENT, "allocId1");
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
@@ -271,7 +277,8 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testPreferAllocatingPreviousPrimary() {
String primaryAllocId = UUIDs.randomBase64UUID();
String replicaAllocId = UUIDs.randomBase64UUID();
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), primaryAllocId, replicaAllocId);
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(),
+ randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED), randomFrom(Version.V_2_0_0, Version.CURRENT), primaryAllocId, replicaAllocId);
boolean node1HasPrimaryShard = randomBoolean();
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard);
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, !node1HasPrimaryShard);
@@ -291,10 +298,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testFoundAllocationButThrottlingDecider() {
final RoutingAllocation allocation;
if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), CLUSTER_RECOVERED,
+ randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_2_0);
testAllocator.addData(node1, 3, null, randomBoolean());
}
testAllocator.allocateUnassigned(allocation);
@@ -311,10 +319,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
public void testFoundAllocationButNoDecider() {
final RoutingAllocation allocation;
if (randomBoolean()) {
- allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
+ allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), CLUSTER_RECOVERED,
+ randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
} else {
- allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0);
+ allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_0_0);
testAllocator.addData(node1, 3, null, randomBoolean());
}
testAllocator.allocateUnassigned(allocation);
@@ -329,7 +338,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* Tests that the highest version node is chosen for allocation.
*/
public void testAllocateToTheHighestVersionOnLegacyIndex() {
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0);
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_0_0);
testAllocator.addData(node1, 10, null, randomBoolean()).addData(node2, 12, null, randomBoolean());
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
@@ -346,7 +355,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* allocation mode would be chosen).
*/
public void testVersionBasedAllocationPrefersShardWithAllocationId() {
- RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0);
+ RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_0_0);
testAllocator.addData(node1, 10, null, randomBoolean());
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, "some allocId", randomBoolean());
testAllocator.addData(node3, 12, null, randomBoolean());
@@ -430,12 +439,12 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
Version version = hasActiveAllocation ? Version.CURRENT : Version.V_2_0_0;
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0)
- .putActiveAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
+ .putInSyncAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
.build();
final Snapshot snapshot = new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID()));
RoutingTable routingTable = RoutingTable.builder()
- .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(snapshot, version, shardId.getIndexName()))
+ .addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(snapshot, version, shardId.getIndexName()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
@@ -514,11 +523,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(version)
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true))
- .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
+ .numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
.build();
RoutingTable routingTable = RoutingTable.builder()
- .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName()))
+ .addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
@@ -615,17 +624,27 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertClusterHealthStatus(allocation, ClusterHealthStatus.RED);
}
- private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version,
+ private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders,
+ UnassignedInfo.Reason reason, Version version,
String... activeAllocationIds) {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(version))
- .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(shardId.id(), Sets.newHashSet(activeAllocationIds)))
+ .numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(shardId.id(), Sets.newHashSet(activeAllocationIds)))
.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
- if (asNew) {
- routingTableBuilder.addAsNew(metaData.index(shardId.getIndex()));
- } else {
- routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex()));
+ switch (reason) {
+
+ case INDEX_CREATED:
+ routingTableBuilder.addAsNew(metaData.index(shardId.getIndex()));
+ break;
+ case CLUSTER_RECOVERED:
+ routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex()));
+ break;
+ case INDEX_REOPENED:
+ routingTableBuilder.addAsFromCloseToOpen(metaData.index(shardId.getIndex()));
+ break;
+ default:
+ throw new IllegalArgumentException("can't do " + reason + " for you. teach me");
}
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
diff --git a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java
index 6c441a7c87..3de96448a4 100644
--- a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java
@@ -41,8 +41,10 @@ public class PriorityComparatorTests extends ESTestCase {
public void testPreferNewIndices() {
RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards(mock(RoutingNodes.class));
- List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null,
- randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null,
+ List<ShardRouting> shardRoutings = Arrays.asList(
+ TestShardRouting.newShardRouting("oldest", 0, null, null,
+ randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")),
+ TestShardRouting.newShardRouting("newest", 0, null, null,
randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")));
Collections.shuffle(shardRoutings, random());
for (ShardRouting routing : shardRoutings) {
@@ -71,8 +73,10 @@ public class PriorityComparatorTests extends ESTestCase {
public void testPreferPriorityIndices() {
RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards(mock(RoutingNodes.class));
- List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null,
- randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null,
+ List<ShardRouting> shardRoutings = Arrays.asList(
+ TestShardRouting.newShardRouting("oldest", 0, null, null,
+ randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")),
+ TestShardRouting.newShardRouting("newest", 0, null, null,
randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")));
Collections.shuffle(shardRoutings, random());
for (ShardRouting routing : shardRoutings) {
@@ -116,7 +120,7 @@ public class PriorityComparatorTests extends ESTestCase {
int numShards = randomIntBetween(10, 100);
for (int i = 0; i < numShards; i++) {
IndexMeta indexMeta = randomFrom(indices);
- shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null, null,
+ shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null,
randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")));
}
shards.sort(new PriorityComparator() {
diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java
index a1d16bfd88..c820bccae5 100644
--- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java
+++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java
@@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
index 824a6bbaf3..1573e55284 100644
--- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
+++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
@@ -417,7 +417,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Running Cluster Health");
ensureGreen();
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); // just wait for merges
- client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get();
+ client().admin().indices().prepareFlush().setForce(true).get();
boolean useSyncIds = randomBoolean();
if (useSyncIds == false) {
diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java
index 2570df3a56..188f10c588 100644
--- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java
@@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
@@ -49,9 +50,10 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
-import org.elasticsearch.test.ESAllocationTestCase;
+import org.elasticsearch.cluster.ESAllocationTestCase;
import org.junit.Before;
+import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
@@ -209,7 +211,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
*/
public void testThrottleWhenAllocatingToMatchingNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(new AllocationDeciders(Settings.EMPTY,
- new AllocationDecider[]{new TestAllocateDecision(Decision.YES), new SameShardAllocationDecider(Settings.EMPTY),
+ Arrays.asList(new TestAllocateDecision(Decision.YES), new SameShardAllocationDecider(Settings.EMPTY),
new AllocationDecider(Settings.EMPTY) {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
@@ -218,7 +220,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
}
return Decision.YES;
}
- }}));
+ })));
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
testAllocator.allocateUnassigned(allocation);
@@ -287,7 +289,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT).put(settings))
.numberOfShards(1).numberOfReplicas(1)
- .putActiveAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())))
+ .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())))
.build();
// mark shard as delayed if reason is NODE_LEFT
boolean delayed = reason == UnassignedInfo.Reason.NODE_LEFT &&
@@ -297,9 +299,11 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(primaryShard)
- .addShard(ShardRouting.newUnassigned(shardId, null, false,
+ .addShard(ShardRouting.newUnassigned(shardId, false,
+ RecoverySource.PeerRecoverySource.INSTANCE,
new UnassignedInfo(reason, null, null, failedAllocations, System.nanoTime(),
- System.currentTimeMillis(), delayed, UnassignedInfo.AllocationStatus.NO_ATTEMPT)))
+ System.currentTimeMillis(), delayed, UnassignedInfo.AllocationStatus.NO_ATTEMPT)
+ ))
.build())
)
.build();
@@ -315,13 +319,13 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1)
- .putActiveAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())))
+ .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())))
.build();
RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(primaryShard)
- .addShard(TestShardRouting.newShardRouting(shardId, node2.getId(), null, null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
+ .addShard(TestShardRouting.newShardRouting(shardId, node2.getId(), null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
.build())
)
.build();
diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java
index a6f4e6db23..81be3057b0 100644
--- a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java
+++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java
@@ -19,11 +19,11 @@
package org.elasticsearch.gateway;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.indices.recovery.RecoveryState;
@@ -56,7 +56,7 @@ public class ReusePeerRecoverySharedTest {
* should this use synced flush? can't use synced from in the bwc
* tests
*/
- public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) {
+ public static void testCase(Settings indexSettings, Runnable restartCluster, Logger logger, boolean useSyncIds) {
/*
* prevent any rebalance actions during the peer recovery if we run into
* a relocation the reuse count will be 0 and this fails the test. We
@@ -80,7 +80,7 @@ public class ReusePeerRecoverySharedTest {
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
// just wait for merges
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get();
- client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get();
+ client().admin().indices().prepareFlush().setForce(true).get();
if (useSyncIds == false) {
logger.info("--> disabling allocation while the cluster is shut down");
diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java
index 9d5e279c8f..434536ac8d 100644
--- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java
+++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java
@@ -61,6 +61,7 @@ public class GetActionIT extends ESIntegTestCase {
public void testSimpleGet() {
assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=keyword,store=true", "field2", "type=keyword,store=true")
.setSettings(Settings.builder().put("index.refresh_interval", -1))
.addAlias(new Alias("alias")));
ensureGreen();
@@ -71,6 +72,10 @@ public class GetActionIT extends ESIntegTestCase {
logger.info("--> index doc 1");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ logger.info("--> non realtime get 1");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(false));
+
logger.info("--> realtime get 1");
response = client().prepareGet(indexOrAlias(), "type1", "1").get();
assertThat(response.isExists(), equalTo(true));
@@ -79,7 +84,7 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
logger.info("--> realtime get 1 (no source, implicit)");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setFields(Strings.EMPTY_ARRAY).get();
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields(Strings.EMPTY_ARRAY).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
Set<String> fields = new HashSet<>(response.getFields().keySet());
@@ -103,20 +108,17 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
- logger.info("--> non realtime get 1");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get();
- assertThat(response.isExists(), equalTo(false));
-
- logger.info("--> realtime fetch of field (requires fetching parsing source)");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").get();
+ logger.info("--> realtime fetch of field");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getSourceAsBytes(), nullValue());
assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
assertThat(response.getField("field2"), nullValue());
- logger.info("--> realtime fetch of field & source (requires fetching parsing source)");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").setFetchSource("field1", null).get();
+ logger.info("--> realtime fetch of field & source");
+ response = client().prepareGet(indexOrAlias(), "type1", "1")
+ .setStoredFields("field1").setFetchSource("field1", null).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getSourceAsMap(), hasKey("field1"));
@@ -142,7 +144,7 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
logger.info("--> realtime fetch of field (loaded from index)");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").get();
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getSourceAsBytes(), nullValue());
@@ -150,7 +152,8 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getField("field2"), nullValue());
logger.info("--> realtime fetch of field & source (loaded from index)");
- response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").setFetchSource(true).get();
+ response = client().prepareGet(indexOrAlias(), "type1", "1")
+ .setStoredFields("field1").setFetchSource(true).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getSourceAsBytes(), not(nullValue()));
@@ -189,6 +192,7 @@ public class GetActionIT extends ESIntegTestCase {
public void testSimpleMultiGet() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", "field", "type=keyword,store=true")
.setSettings(Settings.builder().put("index.refresh_interval", -1)));
ensureGreen();
@@ -230,8 +234,8 @@ public class GetActionIT extends ESIntegTestCase {
// multi get with specific field
response = client().prepareMultiGet()
- .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").fields("field"))
- .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "3").fields("field"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").storedFields("field"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "3").storedFields("field"))
.get();
assertThat(response.getResponses().length, equalTo(2));
@@ -262,12 +266,12 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.isExists(), equalTo(false));
client().prepareIndex("test", "type1", "1")
- .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject()).get();
+ .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get();
client().prepareIndex("test", "type2", "1")
- .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject()).get();
+ .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get();
- response = client().prepareGet("test", "type1", "1").setFields("field").get();
+ response = client().prepareGet("test", "type1", "1").setStoredFields("field").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getType(), equalTo("type1"));
@@ -279,7 +283,7 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
- response = client().prepareGet("test", "type2", "1").setFields("field").get();
+ response = client().prepareGet("test", "type2", "1").setStoredFields("field").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getType(), equalTo("type2"));
assertThat(response.getId(), equalTo("1"));
@@ -292,7 +296,7 @@ public class GetActionIT extends ESIntegTestCase {
// Now test values being fetched from stored fields.
refresh();
- response = client().prepareGet("test", "type1", "1").setFields("field").get();
+ response = client().prepareGet("test", "type1", "1").setStoredFields("field").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
fields = new HashSet<>(response.getFields().keySet());
@@ -302,7 +306,7 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
- response = client().prepareGet("test", "type2", "1").setFields("field").get();
+ response = client().prepareGet("test", "type2", "1").setStoredFields("field").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
fields = new HashSet<>(response.getFields().keySet());
@@ -530,7 +534,7 @@ public class GetActionIT extends ESIntegTestCase {
public void testGetFieldsMetaData() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("parent")
- .addMapping("my-type1", "_parent", "type=parent")
+ .addMapping("my-type1", "_parent", "type=parent", "field1", "type=keyword,store=true")
.addAlias(new Alias("alias"))
.setSettings(Settings.builder().put("index.refresh_interval", -1)));
@@ -544,7 +548,7 @@ public class GetActionIT extends ESIntegTestCase {
GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1")
.setRouting("1")
- .setFields("field1")
+ .setStoredFields("field1")
.get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
@@ -557,7 +561,7 @@ public class GetActionIT extends ESIntegTestCase {
flush();
getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1")
- .setFields("field1")
+ .setStoredFields("field1")
.setRouting("1")
.get();
assertThat(getResponse.isExists(), equalTo(true));
@@ -582,21 +586,18 @@ public class GetActionIT extends ESIntegTestCase {
.setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
.get();
- try {
- client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get();
- fail();
- } catch (IllegalArgumentException e) {
- //all well
- }
+
+ IllegalArgumentException exc =
+ expectThrows(IllegalArgumentException.class,
+ () -> client().prepareGet(indexOrAlias(), "my-type1", "1").setStoredFields("field1").get());
+ assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field"));
flush();
- try {
- client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get();
- fail();
- } catch (IllegalArgumentException e) {
- //all well
- }
+ exc =
+ expectThrows(IllegalArgumentException.class,
+ () -> client().prepareGet(indexOrAlias(), "my-type1", "1").setStoredFields("field1").get());
+ assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field"));
}
public void testGetFieldsComplexField() throws Exception {
@@ -643,14 +644,14 @@ public class GetActionIT extends ESIntegTestCase {
logger.info("checking real time retrieval");
String field = "field1.field2.field3.field4";
- GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setStoredFields(field).get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
- getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setStoredFields(field).get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
@@ -675,14 +676,14 @@ public class GetActionIT extends ESIntegTestCase {
logger.info("checking post-flush retrieval");
- getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ getResponse = client().prepareGet("my-index", "my-type1", "1").setStoredFields(field).get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
- getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setStoredFields(field).get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
@@ -709,7 +710,7 @@ public class GetActionIT extends ESIntegTestCase {
index("test", "my-type1", "1", "some_field", "some text");
refresh();
- GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("_all").get();
+ GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1").setStoredFields("_all").get();
assertNotNull(getResponse.getField("_all").getValue());
assertThat(getResponse.getField("_all").getValue().toString(), equalTo("some text"));
}
@@ -827,12 +828,6 @@ public class GetActionIT extends ESIntegTestCase {
indexSingleDocumentWithStringFieldsGeneratedFromText(true, randomBoolean());
String[] fieldsList = {"_all"};
String[] alwaysNotStoredFieldsList = {"_field_names"};
- // before refresh - document is only in translog
- assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
- assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
- assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
- refresh();
- //after refresh - document is in translog and also indexed
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
flush();
@@ -882,11 +877,6 @@ public class GetActionIT extends ESIntegTestCase {
public void testGeneratedNumberFieldsStored() throws IOException {
indexSingleDocumentWithNumericFieldsGeneratedFromText(true, randomBoolean());
String[] fieldsList = {"token_count", "text.token_count"};
- // before refresh - document is only in translog
- assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
- assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
- refresh();
- //after refresh - document is in translog and also indexed
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
flush();
//after flush - document is in not anymore translog - only indexed
@@ -939,36 +929,30 @@ public class GetActionIT extends ESIntegTestCase {
private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) {
- assertGetFieldWorks(index, type, docId, field, false, routing);
- assertGetFieldWorks(index, type, docId, field, true, routing);
+ assertGetFieldWorks(index, type, docId, field, routing);
+ assertGetFieldWorks(index, type, docId, field, routing);
}
}
- private void assertGetFieldWorks(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
- GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
+ private void assertGetFieldWorks(String index, String type, String docId, String field, @Nullable String routing) {
+ GetResponse response = getDocument(index, type, docId, field, routing);
assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists());
assertNotNull(response.getField(field));
- response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
+ response = multiGetDocument(index, type, docId, field, routing);
assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists());
assertNotNull(response.getField(field));
}
- protected void assertGetFieldsException(String index, String type, String docId, String[] fields) {
- for (String field : fields) {
- assertGetFieldException(index, type, docId, field);
- }
- }
-
private void assertGetFieldException(String index, String type, String docId, String field) {
try {
- client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(false).get();
+ client().prepareGet().setIndex(index).setType(type).setId(docId).setStoredFields(field);
fail();
} catch (ElasticsearchException e) {
assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called."));
}
- MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).setIgnoreErrorsOnGeneratedFields(false).get();
+ MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).storedFields(field)).get();
assertNull(multiGetResponse.getResponses()[0].getResponse());
assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called."));
}
@@ -979,7 +963,7 @@ public class GetActionIT extends ESIntegTestCase {
protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) {
- assertGetFieldNull(index, type, docId, field, true, routing);
+ assertGetFieldNull(index, type, docId, field, routing);
}
}
@@ -989,37 +973,37 @@ public class GetActionIT extends ESIntegTestCase {
protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) {
- assertGetFieldNull(index, type, docId, field, true, routing);
- assertGetFieldNull(index, type, docId, field, false, routing);
+ assertGetFieldNull(index, type, docId, field, routing);
+ assertGetFieldNull(index, type, docId, field, routing);
}
}
- protected void assertGetFieldNull(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
+ protected void assertGetFieldNull(String index, String type, String docId, String field, @Nullable String routing) {
//for get
- GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
+ GetResponse response = getDocument(index, type, docId, field, routing);
assertTrue(response.isExists());
assertNull(response.getField(field));
assertThat(response.getId(), equalTo(docId));
//same for multi get
- response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
+ response = multiGetDocument(index, type, docId, field, routing);
assertNull(response.getField(field));
assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists());
}
- private GetResponse multiGetDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
- MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field);
+ private GetResponse multiGetDocument(String index, String type, String docId, String field, @Nullable String routing) {
+ MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).storedFields(field);
if (routing != null) {
getItem.routing(routing);
}
- MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
+ MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem);
MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
assertThat(multiGetResponse.getResponses().length, equalTo(1));
return multiGetResponse.getResponses()[0].getResponse();
}
- private GetResponse getDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
- GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
+ private GetResponse getDocument(String index, String type, String docId, String field, @Nullable String routing) {
+ GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setStoredFields(field);
if (routing != null) {
getRequestBuilder.setRouting(routing);
}
diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
index 22324e1ff2..afde263d73 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
@@ -43,6 +43,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
@@ -52,13 +54,13 @@ import static org.hamcrest.Matchers.nullValue;
public class IndexServiceTests extends ESSingleNodeTestCase {
public void testDetermineShadowEngineShouldBeUsed() {
Settings regularSettings = Settings.builder()
- .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
- .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
.build();
Settings shadowSettings = Settings.builder()
- .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
- .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
.build();
diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java
index 7ad60895bf..143fdc9fc2 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java
@@ -48,7 +48,7 @@ import org.elasticsearch.index.shard.ShadowIndexShard;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortOrder;
@@ -142,7 +142,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
for (int i = 0; i < numDocs; i++) {
client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get();
}
- assertNoFailures(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertNoFailures(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
.setType("fs").setSettings(Settings.builder()
@@ -184,7 +184,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
}
- @TestLogging("gateway:TRACE")
+ @TestLogging("org.elasticsearch.gateway:TRACE")
public void testIndexWithFewDocuments() throws Exception {
final Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);
@@ -227,10 +227,10 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
// Check that we can get doc 1 and 2, because we are doing realtime
// gets and getting from the primary
- GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(true).setFields("foo").get();
- GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(true).setFields("foo").get();
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
flushAndRefresh(IDX);
client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
@@ -238,10 +238,10 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
refresh();
// Check that we can get doc 1 and 2 without realtime
- gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).setFields("foo").get();
- gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).setFields("foo").get();
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get();
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
logger.info("--> restarting all nodes");
if (randomBoolean()) {
@@ -283,12 +283,12 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
- GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
// Node1 has the primary, now node2 has the replica
String node2 = internalCluster().startNode(nodeSettings);
@@ -304,21 +304,21 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
assertHitCount(resp, 2);
- gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.toString(), gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get();
client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get();
- gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.toString(), gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("foobar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("foobar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("foobar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("foobar"));
}
public void testPrimaryRelocation() throws Exception {
@@ -340,12 +340,12 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
- GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
// Node1 has the primary, now node2 has the replica
String node2 = internalCluster().startNode(nodeSettings);
@@ -363,21 +363,21 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
assertHitCount(resp, 2);
- gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.toString(), gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
- gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").setFields("foo").get();
- gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").setFields("foo").get();
+ gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get();
+ gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get();
assertTrue(gResp1.isExists());
assertTrue(gResp2.isExists());
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
}
public void testPrimaryRelocationWithConcurrentIndexing() throws Exception {
@@ -495,7 +495,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
public void sendRequest(DiscoveryNode node, long requestId, String action,
TransportRequest request, TransportRequestOptions options)
throws IOException, TransportException {
- if (keepFailing.get() && action.equals(RecoveryTargetService.Actions.TRANSLOG_OPS)) {
+ if (keepFailing.get() && action.equals(PeerRecoveryTargetService.Actions.TRANSLOG_OPS)) {
logger.info("--> failing translog ops");
throw new ElasticsearchException("failing on purpose");
}
@@ -573,10 +573,10 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
flushAndRefresh(IDX);
- GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
- GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
- assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
- assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
+ assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
+ assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
logger.info("--> performing query");
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
@@ -594,6 +594,12 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
* Tests that shadow replicas can be "naturally" rebalanced and relocated
* around the cluster. By "naturally" I mean without using the reroute API
*/
+ // This test failed on CI when trying to assert that all the shard data has been deleted
+ // from the index path. It has not been reproduced locally. Despite the IndicesService
+ // deleting the index and hence, deleting all the shard data for the index, the test
+ // failure still showed some Lucene files in the data directory for that index. Not sure
+ // why that is, so turning on more logging here.
+ @TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE")
public void testShadowReplicaNaturalRelocation() throws Exception {
Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);
diff --git a/core/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java b/core/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
index 9caf9790c7..ff60a20622 100644
--- a/core/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
@@ -20,13 +20,9 @@ package org.elasticsearch.index;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
@@ -85,9 +81,9 @@ public class MergePolicySettingsTests extends ESTestCase {
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.mbFrac(), 0);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.mb() + 1, ByteSizeUnit.MB)).build()));
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.001);
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
+ indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
@@ -97,9 +93,9 @@ public class MergePolicySettingsTests extends ESTestCase {
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.mbFrac(), 0.0001);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1)).build()));
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1).mbFrac(), 0.0001);
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
+ indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getReclaimDeletesWeight(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT, 0);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING.getKey(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT + 1).build()));
@@ -111,10 +107,10 @@ public class MergePolicySettingsTests extends ESTestCase {
indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.mb(), ByteSizeUnit.MB).mbFrac(), 0.00);
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
- assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1).mbFrac(), 0.0001);
+ assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getReclaimDeletesWeight(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT, 0);
assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
}
diff --git a/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java
index 4c79ce1b49..2afe0b7fea 100644
--- a/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java
@@ -77,13 +77,6 @@ public class VersionTypeTests extends ESTestCase {
assertTrue(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
assertFalse(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
- assertTrue(VersionType.FORCE.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
- assertFalse(VersionType.FORCE.validateVersionForWrites(Versions.MATCH_ANY));
- assertFalse(VersionType.FORCE.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
- assertTrue(VersionType.FORCE.validateVersionForReads(Versions.MATCH_ANY));
- assertTrue(VersionType.FORCE.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
- assertFalse(VersionType.FORCE.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
-
assertTrue(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
assertTrue(VersionType.INTERNAL.validateVersionForWrites(Versions.MATCH_ANY));
assertFalse(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
@@ -153,36 +146,6 @@ public class VersionTypeTests extends ESTestCase {
}
- public void testForceVersionConflict() throws Exception {
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
-
- // MATCH_ANY must throw an exception in the case of force version, as the version must be set! it used as the new value
- try {
- VersionType.FORCE.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean());
- fail();
- } catch (IllegalStateException e) {
- //yes!!
- }
-
- // if we didn't find a version (but the index does support it), we always accept
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND, randomBoolean()));
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
-
- assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, 10));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY));
-
-
- // and the standard behavior
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 10, randomBoolean()));
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(9, 10, randomBoolean()));
- assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 9, randomBoolean()));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 10));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(9, 10));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 9));
- assertFalse(VersionType.FORCE.isVersionConflictForReads(10, Versions.MATCH_ANY));
- }
-
public void testUpdateVersion() {
assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(1L));
assertThat(VersionType.INTERNAL.updateVersion(1, 1), equalTo(2L));
@@ -196,9 +159,6 @@ public class VersionTypeTests extends ESTestCase {
assertThat(VersionType.EXTERNAL_GTE.updateVersion(1, 10), equalTo(10L));
assertThat(VersionType.EXTERNAL_GTE.updateVersion(10, 10), equalTo(10L));
- assertThat(VersionType.FORCE.updateVersion(Versions.NOT_FOUND, 10), equalTo(10L));
- assertThat(VersionType.FORCE.updateVersion(11, 10), equalTo(10L));
-
// Old indexing code
// if (index.versionType() == VersionType.INTERNAL) { // internal version type
// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java
index 7b6d74a418..d68cbaa9d3 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -30,11 +31,11 @@ import java.io.StringReader;
public class ASCIIFoldingTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding");
String source = "Ansprüche";
String[] expected = new String[]{"Anspruche"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -43,12 +44,12 @@ public class ASCIIFoldingTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testPreserveOriginal() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
.put("index.analysis.filter.my_ascii_folding.preserve_original", true)
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding");
String source = "Ansprüche";
String[] expected = new String[]{"Anspruche", "Ansprüche"};
Tokenizer tokenizer = new WhitespaceTokenizer();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java
index 52fcdd4bb2..4a5a0b9567 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java
@@ -49,12 +49,25 @@ import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
-public class AnalysisServiceTests extends ESTestCase {
+public class AnalysisRegistryTests extends ESTestCase {
+
+ private AnalysisRegistry registry;
private static AnalyzerProvider<?> analyzerProvider(final String name) {
return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDEX, new EnglishAnalyzer());
}
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ Settings settings = Settings
+ .builder()
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ registry = new AnalysisRegistry(new Environment(settings),
+ emptyMap(), emptyMap(), emptyMap(), emptyMap());
+ }
+
public void testDefaultAnalyzers() throws IOException {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings
@@ -63,29 +76,30 @@ public class AnalysisServiceTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
- AnalysisService analysisService = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
- .build(idxSettings);
- assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
- assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
- assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
+ .build(idxSettings);
+ assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
}
public void testOverrideDefaultAnalyzer() throws IOException {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
- singletonMap("default", analyzerProvider("default")), emptyMap(), emptyMap(), emptyMap());
- assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
+ singletonMap("default", analyzerProvider("default"))
+ , emptyMap(), emptyMap(), emptyMap());
+ assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testOverrideDefaultIndexAnalyzerIsUnsupported() {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- AnalyzerProvider<?> defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer());
+ AnalyzerProvider<?> defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
- () -> new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
+ () -> registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_index", defaultIndex), emptyMap(), emptyMap(), emptyMap()));
assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported"));
}
@@ -94,21 +108,21 @@ public class AnalysisServiceTests extends ESTestCase {
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(),
VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1));
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
+ IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap());
- assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
- assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
}
public void testOverrideDefaultSearchAnalyzer() {
Version version = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
+ IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
singletonMap("default_search", analyzerProvider("default_search")), emptyMap(), emptyMap(), emptyMap());
- assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
- assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() {
@@ -118,11 +132,11 @@ public class AnalysisServiceTests extends ESTestCase {
Map<String, AnalyzerProvider<?>> analyzers = new HashMap<>();
analyzers.put("default_index", analyzerProvider("default_index"));
analyzers.put("default_search", analyzerProvider("default_search"));
- AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
+ IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings),
analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
- assertThat(analysisService.defaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
- assertThat(analysisService.defaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
+ assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class));
}
public void testConfigureCamelCaseTokenFilter() throws IOException {
@@ -137,10 +151,10 @@ public class AnalysisServiceTests extends ESTestCase {
.putArray("index.analysis.analyzer.custom_analyzer_1.filter", "lowercase", "word_delimiter").build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
-
- AnalysisService analysisService = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry()
+
+ IndexAnalyzers indexAnalyzers = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry()
.build(idxSettings);
- try (NamedAnalyzer custom_analyser = analysisService.analyzer("custom_analyzer")) {
+ try (NamedAnalyzer custom_analyser = indexAnalyzers.get("custom_analyzer")) {
assertNotNull(custom_analyser);
TokenStream tokenStream = custom_analyser.tokenStream("foo", "J2SE j2ee");
tokenStream.reset();
@@ -154,7 +168,7 @@ public class AnalysisServiceTests extends ESTestCase {
assertEquals("j2ee", token.get(1));
}
- try (NamedAnalyzer custom_analyser = analysisService.analyzer("custom_analyzer_1")) {
+ try (NamedAnalyzer custom_analyser = indexAnalyzers.get("custom_analyzer_1")) {
assertNotNull(custom_analyser);
TokenStream tokenStream = custom_analyser.tokenStream("foo", "J2SE j2ee");
tokenStream.reset();
@@ -178,14 +192,14 @@ public class AnalysisServiceTests extends ESTestCase {
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
- AnalysisService analysisService = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
+ IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
.build(idxSettings);
- AnalysisService otherAnalysisSergice = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(),
+ IndexAnalyzers otherIndexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(),
emptyMap()).build(idxSettings);
final int numIters = randomIntBetween(5, 20);
for (int i = 0; i < numIters; i++) {
PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values());
- assertSame(analysisService.analyzer(preBuiltAnalyzers.name()), otherAnalysisSergice.analyzer(preBuiltAnalyzers.name()));
+ assertSame(indexAnalyzers.get(preBuiltAnalyzers.name()), otherIndexAnalyzers.get(preBuiltAnalyzers.name()));
}
}
@@ -204,4 +218,15 @@ public class AnalysisServiceTests extends ESTestCase {
() -> new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()).build(idxSettings));
assertThat(e.getMessage(), equalTo("analyzer [test_analyzer] must specify either an analyzer type, or a tokenizer"));
}
+
+ public void testCloseIndexAnalyzersMultipleTimes() throws IOException {
+ Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
+ Settings indexSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
+ IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap())
+ .build(idxSettings);
+ indexAnalyzers.close();
+ indexAnalyzers.close();
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java
index b4042164e2..4073bbdbbc 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java
@@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis;
-import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
index 40ec2b412f..a60c21c1a7 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.AnalysisModule;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
@@ -34,21 +35,25 @@ import static java.util.Collections.emptyList;
public class AnalysisTestsHelper {
- public static AnalysisService createAnalysisServiceFromClassPath(Path baseDir, String resource) throws IOException {
+ public static ESTestCase.TestAnalysis createTestAnalysisFromClassPath(Path baseDir, String resource) throws IOException {
Settings settings = Settings.builder()
.loadFromStream(resource, AnalysisTestsHelper.class.getResourceAsStream(resource))
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString())
.build();
- return createAnalysisServiceFromSettings(settings);
+ return createTestAnalysisFromSettings(settings);
}
- public static AnalysisService createAnalysisServiceFromSettings(
+ public static ESTestCase.TestAnalysis createTestAnalysisFromSettings(
Settings settings) throws IOException {
if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) {
settings = Settings.builder().put(settings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
}
- IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
- return new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry().build(idxSettings);
+ IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
+ AnalysisRegistry analysisRegistry = new AnalysisModule(new Environment(settings), emptyList()).getAnalysisRegistry();
+ return new ESTestCase.TestAnalysis(analysisRegistry.build(indexSettings),
+ analysisRegistry.buildTokenFilterFactories(indexSettings),
+ analysisRegistry.buildTokenizerFactories(indexSettings),
+ analysisRegistry.buildCharFilterFactories(indexSettings));
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
index d2e2d4cc6e..5ae2fbbb1c 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
@@ -21,6 +21,7 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -30,8 +31,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/cjk_analysis.json";
public void testDefault() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_bigram");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
Tokenizer tokenizer = new StandardTokenizer();
@@ -40,8 +41,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testNoFlags() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_no_flags");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
Tokenizer tokenizer = new StandardTokenizer();
@@ -50,8 +51,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testHanOnly() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_han_only");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" };
Tokenizer tokenizer = new StandardTokenizer();
@@ -60,8 +61,8 @@ public class CJKFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testHanUnigramOnly() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_han_unigram_only");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[]{"多", "く", "の", "学", "学生", "生", "が", "試", "試験", "験", "に", "落", "ち", "た" };
Tokenizer tokenizer = new StandardTokenizer();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
index 3f2b1461ef..206dffd0fb 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
@@ -26,7 +26,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
-import static org.elasticsearch.test.ESTestCase.createAnalysisService;
+import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
/**
*/
@@ -41,8 +41,8 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
- AnalysisService analysisService = createAnalysisService(idxSettings, settings);
- NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
+ NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
@@ -58,9 +58,9 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
- AnalysisService analysisService = createAnalysisService(idxSettings, settings);
- NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
+ NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
@@ -80,8 +80,8 @@ public class CharFilterTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
- AnalysisService analysisService = createAnalysisService(idxSettings, settings);
- NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
+ NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"});
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
index 0c9010b2c9..ede4240455 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
@@ -61,9 +61,7 @@ public class CompoundAnalysisTests extends ESTestCase {
return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
}
}));
- AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
-
- TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
+ TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec");
MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
}
@@ -85,9 +83,8 @@ public class CompoundAnalysisTests extends ESTestCase {
return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
}
}));
- AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
-
- Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+ IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
+ Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", text, 1.0f);
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java
index ac41220702..c5e854879e 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java
@@ -20,7 +20,7 @@ package org.elasticsearch.index.analysis;
*/
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.CharArraySet;
import org.elasticsearch.test.ESTokenStreamTestCase;
public class FingerprintAnalyzerTests extends ESTokenStreamTestCase {
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
index 45e4dd24f9..2708387da1 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
@@ -36,8 +36,8 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase {
.put("index.analysis.filter.en_US.locale", "en_US")
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("en_US");
assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
assertThat(hunspellTokenFilter.dedup(), is(true));
@@ -50,8 +50,8 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase {
.put("index.analysis.filter.en_US.locale", "en_US")
.build();
- analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- tokenFilter = analysisService.tokenFilter("en_US");
+ analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ tokenFilter = analysis.tokenFilter.get("en_US");
assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
assertThat(hunspellTokenFilter.dedup(), is(false));
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
index 35148874e1..d5a6a590e7 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.junit.Assert;
@@ -35,8 +36,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/keep_analysis.json";
public void testLoadWithoutSettings() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep");
Assert.assertNull(tokenFilter);
}
@@ -48,7 +49,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]")
.build();
try {
- AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
Assert.fail("path and array are configured");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@@ -64,7 +65,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
// test our none existing setup is picked up
- AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("expected an exception due to non existent keep_words_path");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@@ -76,7 +77,7 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
// test our none existing setup is picked up
- AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] ");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@@ -86,8 +87,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testCaseInsensitiveMapping() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keep_filter");
assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
String source = "hello small world";
String[] expected = new String[]{"hello", "world"};
@@ -97,8 +98,8 @@ public class KeepFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testCaseSensitiveMapping() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_case_sensitive_keep_filter");
assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
String source = "Hello small world";
String[] expected = new String[]{"Hello"};
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java
index 986b79fad2..48ce1139d8 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -37,8 +38,8 @@ public class KeepTypesFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.keep_numbers.type", "keep_types")
.putArray("index.analysis.filter.keep_numbers.types", new String[] {"<NUM>", "<SOMETHINGELSE>"})
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep_numbers");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers");
assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class));
String source = "Hello 123 world";
String[] expected = new String[]{"123"};
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
index bf17e5c7bf..f1d810505b 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -34,9 +35,9 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_default.type", "limit")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_default");
String source = "the quick brown fox";
String[] expected = new String[] { "the" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -44,7 +45,7 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
{
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit");
String source = "the quick brown fox";
String[] expected = new String[] { "the" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -61,8 +62,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -76,8 +77,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -92,8 +93,8 @@ public class LimitTokenCountFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.limit_1.consume_all_tokens", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("limit_1");
String source = "the quick brown fox";
String[] expected = new String[] { "the", "quick", "brown", "fox" };
Tokenizer tokenizer = new WhitespaceTokenizer();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/MinHashFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/MinHashFilterFactoryTests.java
new file mode 100644
index 0000000000..fc78afa7ab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/MinHashFilterFactoryTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.ESTokenStreamTestCase;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class MinHashFilterFactoryTests extends ESTokenStreamTestCase {
+ public void testDefault() throws IOException {
+ int default_hash_count = 1;
+ int default_bucket_size = 512;
+ int default_hash_set_size = 1;
+ Settings settings = Settings.builder()
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash");
+ String source = "the quick brown fox";
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+
+ // with_rotation is true by default, and hash_set_size is 1, so even though the source doesn't
+ // have enough tokens to fill all the buckets, we still expect 512 tokens.
+ assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer),
+ default_hash_count * default_bucket_size * default_hash_set_size);
+ }
+
+ public void testSettings() throws IOException {
+ Settings settings = Settings.builder()
+ .put("index.analysis.filter.test_min_hash.type", "min_hash")
+ .put("index.analysis.filter.test_min_hash.hash_count", "1")
+ .put("index.analysis.filter.test_min_hash.bucket_count", "2")
+ .put("index.analysis.filter.test_min_hash.hash_set_size", "1")
+ .put("index.analysis.filter.test_min_hash.with_rotation", false)
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash");
+ String source = "sushi";
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+
+ // despite the fact that bucket_count is 2 and hash_set_size is 1,
+ // because with_rotation is false, we only expect 1 token here.
+ assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer), 1);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
index caefb1039c..126bbe2ab9 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
-import static org.elasticsearch.test.ESTestCase.createAnalysisService;
+import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
import static org.hamcrest.Matchers.containsString;
public class PatternCaptureTokenFilterTests extends ESTokenStreamTestCase {
@@ -40,17 +40,16 @@ public class PatternCaptureTokenFilterTests extends ESTokenStreamTestCase {
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
- AnalysisService analysisService = createAnalysisService(idxSettings, settings);
-
- NamedAnalyzer analyzer1 = analysisService.analyzer("single");
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
+ NamedAnalyzer analyzer1 = indexAnalyzers.get("single");
assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[]{"foobarbaz","foobar","foo"});
- NamedAnalyzer analyzer2 = analysisService.analyzer("multi");
+ NamedAnalyzer analyzer2 = indexAnalyzers.get("multi");
assertTokenStreamContents(analyzer2.tokenStream("test", "abc123def"), new String[]{"abc123def","abc","123","def"});
- NamedAnalyzer analyzer3 = analysisService.analyzer("preserve");
+ NamedAnalyzer analyzer3 = indexAnalyzers.get("preserve");
assertTokenStreamContents(analyzer3.tokenStream("test", "foobarbaz"), new String[]{"foobar","foo"});
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
index 186f6ac1cb..64663c3682 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
@@ -19,12 +19,14 @@
package org.elasticsearch.index.analysis;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -37,8 +39,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
private static final String RESOURCE = "/org/elasticsearch/index/analysis/shingle_analysis.json";
public void testDefault() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle");
String source = "the quick brown fox";
String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -47,8 +49,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testInverseMapping() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_inverse");
assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
String source = "the quick brown fox";
String[] expected = new String[]{"the_quick_brown", "quick_brown_fox"};
@@ -58,8 +60,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testInverseMappingNoShingles() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_inverse");
assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
String source = "the quick";
String[] expected = new String[]{"the", "quick"};
@@ -69,8 +71,8 @@ public class ShingleTokenFilterFactoryTests extends ESTokenStreamTestCase {
}
public void testFillerToken() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_filler");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("shingle_filler");
String source = "simon the sorcerer";
String[] expected = new String[]{"simon FILLER", "simon FILLER sorcerer", "FILLER sorcerer"};
Tokenizer tokenizer = new WhitespaceTokenizer();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java
index a414f41276..7c4818c63b 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java
@@ -26,6 +26,7 @@ import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.VersionUtils;
@@ -53,13 +54,14 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_english");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_english");
assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream create = tokenFilter.create(tokenizer);
- NamedAnalyzer analyzer = analysisService.analyzer("my_english");
+ IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
+ NamedAnalyzer analyzer = indexAnalyzers.get("my_english");
assertThat(create, instanceOf(PorterStemFilter.class));
assertAnalyzesTo(analyzer, "consolingly", new String[]{"consolingli"});
}
@@ -80,13 +82,14 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_porter2");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_porter2");
assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream create = tokenFilter.create(tokenizer);
- NamedAnalyzer analyzer = analysisService.analyzer("my_porter2");
+ IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
+ NamedAnalyzer analyzer = indexAnalyzers.get("my_porter2");
assertThat(create, instanceOf(SnowballFilter.class));
assertAnalyzesTo(analyzer, "possibly", new String[]{"possibl"});
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
index 88c5fe692d..e166f4b7b9 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
-import static org.elasticsearch.test.ESTestCase.createAnalysisService;
+import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
public class StopAnalyzerTests extends ESTokenStreamTestCase {
public void testDefaultsCompoundAnalysis() throws Exception {
@@ -38,13 +38,12 @@ public class StopAnalyzerTests extends ESTokenStreamTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
- AnalysisService analysisService = createAnalysisService(idxSettings, settings);
-
- NamedAnalyzer analyzer1 = analysisService.analyzer("analyzer1");
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
+ NamedAnalyzer analyzer1 = indexAnalyzers.get("analyzer1");
assertTokenStreamContents(analyzer1.tokenStream("test", "to be or not to be"), new String[0]);
- NamedAnalyzer analyzer2 = analysisService.analyzer("analyzer2");
+ NamedAnalyzer analyzer2 = indexAnalyzers.get("analyzer2");
assertTokenStreamContents(analyzer2.tokenStream("test", "to be or not to be"), new String[0]);
}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
index 191b7ffcdf..7d91619075 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
@@ -19,15 +19,16 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
import org.apache.lucene.util.Version;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -47,7 +48,7 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
builder.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString());
Settings settings = builder.build();
try {
- AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("enable_position_increments is not supported anymore"));
@@ -62,8 +63,8 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
// don't specify
}
builder.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString());
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(builder.build());
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_stop");
assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
@@ -77,8 +78,8 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.my_stop.remove_trailing", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_stop");
assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo an"));
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
index f94252caba..1a7903bcfa 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
@@ -30,11 +31,11 @@ import java.io.StringReader;
public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -43,13 +44,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateWords() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -58,13 +59,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateNumbers() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -73,14 +74,14 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testCatenateAll() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -89,12 +90,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testSplitOnCaseChange() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"PowerShot"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -103,12 +104,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testPreserveOriginal() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -117,12 +118,12 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
}
public void testStemEnglishPossessive() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -132,13 +133,13 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase
/** Correct offset order when doing both parts and concatenation: PowerShot is a synonym of Power */
public void testPartsAndCatenate() throws IOException {
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(Settings.builder()
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
.build());
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"Power", "PowerShot", "Shot" };
Tokenizer tokenizer = new WhitespaceTokenizer();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
index 3c192052db..ed6866e6a8 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
@@ -24,9 +24,10 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.junit.Assert;
@@ -43,7 +44,7 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.build();
try {
- AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
Assert.fail("[common_words] or [common_words_path] is set");
} catch (IllegalArgumentException e) {
} catch (IOException e) {
@@ -58,9 +59,9 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_default");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -75,9 +76,9 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
{
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_default");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -94,8 +95,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_1");
String source = "the quick brown is a fox or noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "fox_or", "or", "or_noT", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -108,8 +109,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_2");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "or", "why", "why_noT", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -121,8 +122,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_3");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -138,15 +139,17 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createHome())
.build();
{
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
+ .indexAnalyzers;
+ Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
}
{
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
+ .indexAnalyzers;
+ Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer_file").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
@@ -161,8 +164,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.common_grams_1.ignore_case", true)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_1");
String source = "the quick brown is a fox or noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox_or", "or_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -176,8 +179,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put("index.analysis.filter.common_grams_2.ignore_case", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_2");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -190,8 +193,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_3");
String source = "the quick brown is a fox or why noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -204,8 +207,8 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4");
+ ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("common_grams_4");
String source = "the quick brown is a fox Or noT";
String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "Or", "noT" };
Tokenizer tokenizer = new WhitespaceTokenizer();
@@ -221,15 +224,17 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createHome())
.build();
{
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
+ .indexAnalyzers;
+ Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
}
{
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ IndexAnalyzers indexAnalyzers = AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
+ .indexAnalyzers;
+ Analyzer analyzer = indexAnalyzers.get("commongramsAnalyzer_file").analyzer();
String source = "the quick brown is a fox or not";
String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java b/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
index c7cd3cd625..1c9a479813 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
@@ -18,9 +18,9 @@
*/
package org.elasticsearch.index.analysis.filter1;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopAnalyzer;
-import org.apache.lucene.analysis.core.StopFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@@ -36,4 +36,4 @@ public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory {
public TokenStream create(TokenStream tokenStream) {
return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
}
-} \ No newline at end of file
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java
index 9e4d5b27ad..c6dfdc1a41 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java
@@ -19,18 +19,18 @@
package org.elasticsearch.index.analysis.synonyms;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.all.AllTokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.hamcrest.MatcherAssert;
@@ -45,8 +45,8 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class SynonymsAnalysisTests extends ESTestCase {
- protected final ESLogger logger = Loggers.getLogger(getClass());
- private AnalysisService analysisService;
+ protected final Logger logger = Loggers.getLogger(getClass());
+ private IndexAnalyzers indexAnalyzers;
public void testSynonymsAnalysis() throws IOException {
InputStream synonyms = getClass().getResourceAsStream("synonyms.txt");
@@ -64,7 +64,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
- analysisService = createAnalysisService(idxSettings, settings);
+ indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
@@ -74,8 +74,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
}
private void match(String analyzerName, String source, String target) throws IOException {
-
- Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+ Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();
TokenStream stream = AllTokenStream.allTokenStream("_all", source, 1.0f, analyzer);
stream.reset();
diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
index 80f1cbe46d..ea89acbd8f 100644
--- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
+++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
@@ -25,7 +25,7 @@ import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene53.Lucene53Codec;
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
-import org.apache.lucene.codecs.lucene60.Lucene60Codec;
+import org.apache.lucene.codecs.lucene62.Lucene62Codec;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
@@ -37,7 +37,7 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@@ -55,7 +55,7 @@ public class CodecTests extends ESTestCase {
public void testResolveDefaultCodecs() throws Exception {
CodecService codecService = createCodecService();
assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
- assertThat(codecService.codec("default"), instanceOf(Lucene60Codec.class));
+ assertThat(codecService.codec("default"), instanceOf(Lucene62Codec.class));
assertThat(codecService.codec("Lucene54"), instanceOf(Lucene54Codec.class));
assertThat(codecService.codec("Lucene53"), instanceOf(Lucene53Codec.class));
assertThat(codecService.codec("Lucene50"), instanceOf(Lucene50Codec.class));
@@ -95,9 +95,9 @@ public class CodecTests extends ESTestCase {
.build();
IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings);
SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap());
- AnalysisService analysisService = createAnalysisService(settings, nodeSettings);
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(settings, nodeSettings).indexAnalyzers;
MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap());
- MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry, () -> null);
+ MapperService service = new MapperService(settings, indexAnalyzers, similarityService, mapperRegistry, () -> null);
return new CodecService(service, ESLoggerFactory.getLogger("test"));
}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 6ae432dfbf..2ea45f7a40 100644
--- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -20,11 +20,13 @@
package org.elasticsearch.index.engine;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.filter.RegexFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
@@ -42,6 +44,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHitCountCollector;
@@ -52,23 +55,26 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.AnalysisRegistry;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.Engine.Searcher;
import org.elasticsearch.index.mapper.ContentPath;
@@ -169,10 +175,10 @@ public class InternalEngineTests extends ESTestCase {
codecName = "default";
}
defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder()
- .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
+ .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
.put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
- .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
+ .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(),
between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY)))
.build()); // TODO randomize more settings
threadPool = new TestThreadPool(getClass().getName());
@@ -204,7 +210,8 @@ public class InternalEngineTests extends ESTestCase {
return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(),
config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(),
new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(),
- config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners());
+ config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(),
+ config.getMaxUnsafeAutoIdTimestamp());
}
@Override
@@ -274,7 +281,7 @@ public class InternalEngineTests extends ESTestCase {
}
protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException {
- EngineConfig config = config(indexSettings, store, translogPath, mergePolicy);
+ EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
InternalEngine internalEngine = new InternalEngine(config);
if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) {
internalEngine.recoverFromTranslog();
@@ -282,7 +289,8 @@ public class InternalEngineTests extends ESTestCase {
return internalEngine;
}
- public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) {
+ public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
+ long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) {
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
final EngineConfig.OpenMode openMode;
@@ -304,7 +312,8 @@ public class InternalEngineTests extends ESTestCase {
EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(),
mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener,
new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(),
- IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), null);
+ IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListener,
+ maxUnsafeAutoIdTimestamp);
return config;
}
@@ -581,6 +590,7 @@ public class InternalEngineTests extends ESTestCase {
engine.close();
engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
+ assertTrue(engine.isRecovering());
engine.recoverFromTranslog();
Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test"));
assertThat(counter.get(), equalTo(2));
@@ -589,13 +599,16 @@ public class InternalEngineTests extends ESTestCase {
}
public void testFlushIsDisabledDuringTranslogRecovery() throws IOException {
+ assertFalse(engine.isRecovering());
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
engine.index(new Engine.Index(newUid("1"), doc));
engine.close();
engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
- expectThrows(FlushNotAllowedEngineException.class, () -> engine.flush(true, true));
+ expectThrows(IllegalStateException.class, () -> engine.flush(true, true));
+ assertTrue(engine.isRecovering());
engine.recoverFromTranslog();
+ assertFalse(engine.isRecovering());
doc = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
engine.index(new Engine.Index(newUid("2"), doc));
engine.flush();
@@ -610,7 +623,7 @@ public class InternalEngineTests extends ESTestCase {
for (int i = 0; i < ops; i++) {
final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
if (randomBoolean()) {
- final Engine.Index operation = new Engine.Index(newUid("test#1"), doc, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime());
+ final Engine.Index operation = new Engine.Index(newUid("test#1"), doc, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
operations.add(operation);
initialEngine.index(operation);
} else {
@@ -726,17 +739,17 @@ public class InternalEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
searchResult.close();
+ // but, not there non realtime
+ Engine.GetResult getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
// but, we can still get it (in realtime)
- Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1")));
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source().source, equalTo(B_1));
- assertThat(getResult.docIdAndVersion(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
- // but, not there non realtime
- getResult = engine.get(new Engine.Get(false, newUid("1")));
- assertThat(getResult.exists(), equalTo(false));
- getResult.release();
// refresh and it should be there
engine.refresh("test");
@@ -769,8 +782,7 @@ public class InternalEngineTests extends ESTestCase {
// but, we can still get it (in realtime)
getResult = engine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source().source, equalTo(B_2));
- assertThat(getResult.docIdAndVersion(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
// refresh and it should be updated
@@ -835,7 +847,6 @@ public class InternalEngineTests extends ESTestCase {
// and, verify get (in real time)
getResult = engine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source(), nullValue());
assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
@@ -903,7 +914,7 @@ public class InternalEngineTests extends ESTestCase {
public void testSyncedFlush() throws IOException {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
- new LogByteSizeMergePolicy()))) {
+ new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
engine.index(new Engine.Index(newUid("1"), doc));
@@ -930,7 +941,7 @@ public class InternalEngineTests extends ESTestCase {
for (int i = 0; i < iters; i++) {
try (Store store = createStore();
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
- new LogDocMergePolicy()))) {
+ new LogDocMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
Engine.Index doc1 = new Engine.Index(newUid("1"), doc);
@@ -943,7 +954,7 @@ public class InternalEngineTests extends ESTestCase {
engine.flush();
final boolean forceMergeFlushes = randomBoolean();
if (forceMergeFlushes) {
- engine.index(new Engine.Index(newUid("3"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos()));
+ engine.index(new Engine.Index(newUid("3"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false));
} else {
engine.index(new Engine.Index(newUid("3"), doc));
}
@@ -1019,12 +1030,6 @@ public class InternalEngineTests extends ESTestCase {
engine.index(new Engine.Index(newUid("2"), doc));
EngineConfig config = engine.config();
engine.close();
- final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
- if (directory != null) {
- // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
- // this so we have to disable the check explicitly
- directory.setPreventDoubleWrite(false);
- }
engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
engine.recoverFromTranslog();
assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
@@ -1036,7 +1041,7 @@ public class InternalEngineTests extends ESTestCase {
engine.index(create);
assertThat(create.version(), equalTo(1L));
- create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(create);
assertThat(create.version(), equalTo(1L));
}
@@ -1047,18 +1052,18 @@ public class InternalEngineTests extends ESTestCase {
engine.index(index);
assertThat(index.version(), equalTo(1L));
- index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
assertThat(index.version(), equalTo(1L));
}
public void testExternalVersioningNewIndex() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
- Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
engine.index(index);
assertThat(index.version(), equalTo(12L));
- index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
assertThat(index.version(), equalTo(12L));
}
@@ -1073,7 +1078,7 @@ public class InternalEngineTests extends ESTestCase {
engine.index(index);
assertThat(index.version(), equalTo(2L));
- index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1082,7 +1087,7 @@ public class InternalEngineTests extends ESTestCase {
}
// future versions should not work as well
- index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1093,15 +1098,15 @@ public class InternalEngineTests extends ESTestCase {
public void testExternalVersioningIndexConflict() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
- Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
engine.index(index);
assertThat(index.version(), equalTo(12L));
- index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
engine.index(index);
assertThat(index.version(), equalTo(14L));
- index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1122,7 +1127,7 @@ public class InternalEngineTests extends ESTestCase {
engine.flush();
- index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1131,7 +1136,7 @@ public class InternalEngineTests extends ESTestCase {
}
// future versions should not work as well
- index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1142,17 +1147,17 @@ public class InternalEngineTests extends ESTestCase {
public void testExternalVersioningIndexConflictWithFlush() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
- Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
engine.index(index);
assertThat(index.version(), equalTo(12L));
- index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
engine.index(index);
assertThat(index.version(), equalTo(14L));
engine.flush();
- index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1164,7 +1169,7 @@ public class InternalEngineTests extends ESTestCase {
public void testForceMerge() throws IOException {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
- new LogByteSizeMergePolicy()))) { // use log MP here we test some behavior in ESMP
+ new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) { // use log MP here we test some behavior in ESMP
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, null);
@@ -1289,7 +1294,7 @@ public class InternalEngineTests extends ESTestCase {
assertThat(delete.version(), equalTo(3L));
// now check if we can index to a delete doc with version
- index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1298,7 +1303,7 @@ public class InternalEngineTests extends ESTestCase {
}
// we shouldn't be able to create as well
- Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(create);
} catch (VersionConflictEngineException e) {
@@ -1345,7 +1350,7 @@ public class InternalEngineTests extends ESTestCase {
engine.flush();
// now check if we can index to a delete doc with version
- index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0);
+ index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(index);
fail();
@@ -1354,7 +1359,7 @@ public class InternalEngineTests extends ESTestCase {
}
// we shouldn't be able to create as well
- Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(create);
} catch (VersionConflictEngineException e) {
@@ -1364,11 +1369,11 @@ public class InternalEngineTests extends ESTestCase {
public void testVersioningCreateExistsException() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
- Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
engine.index(create);
assertThat(create.version(), equalTo(1L));
- create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(create);
fail();
@@ -1379,13 +1384,13 @@ public class InternalEngineTests extends ESTestCase {
public void testVersioningCreateExistsExceptionWithFlush() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
- Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
engine.index(create);
assertThat(create.version(), equalTo(1L));
engine.flush();
- create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0);
+ create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
try {
engine.index(create);
fail();
@@ -1405,12 +1410,12 @@ public class InternalEngineTests extends ESTestCase {
assertThat(index.version(), equalTo(2L));
// apply the second index to the replica, should work fine
- index = new Engine.Index(newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ index = new Engine.Index(newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
assertThat(index.version(), equalTo(2L));
// now, the old one should not work
- index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
try {
replicaEngine.index(index);
fail();
@@ -1421,7 +1426,7 @@ public class InternalEngineTests extends ESTestCase {
// second version on replica should fail as well
try {
index = new Engine.Index(newUid("1"), doc, 2L
- , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
assertThat(index.version(), equalTo(2L));
} catch (VersionConflictEngineException e) {
@@ -1437,7 +1442,7 @@ public class InternalEngineTests extends ESTestCase {
// apply the first index to the replica, should work fine
index = new Engine.Index(newUid("1"), doc, 1L
- , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
assertThat(index.version(), equalTo(1L));
@@ -1469,7 +1474,7 @@ public class InternalEngineTests extends ESTestCase {
// now do the second index on the replica, it should fail
try {
- index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false);
replicaEngine.index(index);
fail("excepted VersionConflictEngineException to be thrown");
} catch (VersionConflictEngineException e) {
@@ -1480,69 +1485,71 @@ public class InternalEngineTests extends ESTestCase {
public void testBasicCreatedFlag() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
Engine.Index index = new Engine.Index(newUid("1"), doc);
- assertTrue(engine.index(index));
+ engine.index(index);
+ assertTrue(index.isCreated());
index = new Engine.Index(newUid("1"), doc);
- assertFalse(engine.index(index));
+ engine.index(index);
+ assertFalse(index.isCreated());
engine.delete(new Engine.Delete(null, "1", newUid("1")));
index = new Engine.Index(newUid("1"), doc);
- assertTrue(engine.index(index));
+ engine.index(index);
+ assertTrue(index.isCreated());
}
public void testCreatedFlagAfterFlush() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
Engine.Index index = new Engine.Index(newUid("1"), doc);
- assertTrue(engine.index(index));
+ engine.index(index);
+ assertTrue(index.isCreated());
engine.delete(new Engine.Delete(null, "1", newUid("1")));
engine.flush();
index = new Engine.Index(newUid("1"), doc);
- assertTrue(engine.index(index));
+ engine.index(index);
+ assertTrue(index.isCreated());
}
- private static class MockAppender extends AppenderSkeleton {
+ private static class MockAppender extends AbstractAppender {
public boolean sawIndexWriterMessage;
public boolean sawIndexWriterIFDMessage;
+ public MockAppender(final String name) throws IllegalAccessException {
+ super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
+ }
+
@Override
- protected void append(LoggingEvent event) {
- if (event.getLevel() == Level.TRACE && event.getMessage().toString().contains("[index][1] ")) {
- if (event.getLoggerName().endsWith("lucene.iw") &&
- event.getMessage().toString().contains("IW: apply all deletes during flush")) {
+ public void append(LogEvent event) {
+ final String formattedMessage = event.getMessage().getFormattedMessage();
+ if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][1] ")) {
+ if (event.getLoggerName().endsWith(".IW") &&
+ formattedMessage.contains("IW: apply all deletes during flush")) {
sawIndexWriterMessage = true;
}
- if (event.getLoggerName().endsWith("lucene.iw.ifd")) {
+ if (event.getLoggerName().endsWith(".IFD")) {
sawIndexWriterIFDMessage = true;
}
}
}
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
- @Override
- public void close() {
- }
}
// #5891: make sure IndexWriter's infoStream output is
// sent to lucene.iw with log level TRACE:
- public void testIndexWriterInfoStream() {
+ public void testIndexWriterInfoStream() throws IllegalAccessException {
assumeFalse("who tests the tester?", VERBOSE);
- MockAppender mockAppender = new MockAppender();
+ MockAppender mockAppender = new MockAppender("testIndexWriterInfoStream");
- Logger rootLogger = Logger.getRootLogger();
+ Logger rootLogger = LogManager.getRootLogger();
Level savedLevel = rootLogger.getLevel();
- rootLogger.addAppender(mockAppender);
- rootLogger.setLevel(Level.DEBUG);
+ Loggers.addAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, Level.DEBUG);
+ rootLogger = LogManager.getRootLogger();
try {
// First, with DEBUG, which should NOT log IndexWriter output:
@@ -1552,32 +1559,26 @@ public class InternalEngineTests extends ESTestCase {
assertFalse(mockAppender.sawIndexWriterMessage);
// Again, with TRACE, which should log IndexWriter output:
- rootLogger.setLevel(Level.TRACE);
+ Loggers.setLevel(rootLogger, Level.TRACE);
engine.index(new Engine.Index(newUid("2"), doc));
engine.flush();
assertTrue(mockAppender.sawIndexWriterMessage);
} finally {
- rootLogger.removeAppender(mockAppender);
- rootLogger.setLevel(savedLevel);
+ Loggers.removeAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, savedLevel);
}
}
// #8603: make sure we can separately log IFD's messages
- public void testIndexWriterIFDInfoStream() {
+ public void testIndexWriterIFDInfoStream() throws IllegalAccessException {
assumeFalse("who tests the tester?", VERBOSE);
- MockAppender mockAppender = new MockAppender();
+ MockAppender mockAppender = new MockAppender("testIndexWriterIFDInfoStream");
- // Works when running this test inside Intellij:
- Logger iwIFDLogger = LogManager.exists("org.elasticsearch.index.engine.lucene.iw.ifd");
- if (iwIFDLogger == null) {
- // Works when running this test from command line:
- iwIFDLogger = LogManager.exists("index.engine.lucene.iw.ifd");
- assertNotNull(iwIFDLogger);
- }
+ final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD");
- iwIFDLogger.addAppender(mockAppender);
- iwIFDLogger.setLevel(Level.DEBUG);
+ Loggers.addAppender(iwIFDLogger, mockAppender);
+ Loggers.setLevel(iwIFDLogger, Level.DEBUG);
try {
// First, with DEBUG, which should NOT log IndexWriter output:
@@ -1588,21 +1589,21 @@ public class InternalEngineTests extends ESTestCase {
assertFalse(mockAppender.sawIndexWriterIFDMessage);
// Again, with TRACE, which should only log IndexWriter IFD output:
- iwIFDLogger.setLevel(Level.TRACE);
+ Loggers.setLevel(iwIFDLogger, Level.TRACE);
engine.index(new Engine.Index(newUid("2"), doc));
engine.flush();
assertFalse(mockAppender.sawIndexWriterMessage);
assertTrue(mockAppender.sawIndexWriterIFDMessage);
} finally {
- iwIFDLogger.removeAppender(mockAppender);
- iwIFDLogger.setLevel(null);
+ Loggers.removeAppender(iwIFDLogger, mockAppender);
+ Loggers.setLevel(iwIFDLogger, (Level) null);
}
}
public void testEnableGcDeletes() throws Exception {
try (Store store = createStore();
- Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy()))) {
+ Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
engine.config().setEnableGcDeletes(false);
// Add document
@@ -1610,7 +1611,7 @@ public class InternalEngineTests extends ESTestCase {
document.add(new TextField("value", "test1", Field.Store.YES));
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, null);
- engine.index(new Engine.Index(newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ engine.index(new Engine.Index(newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false));
// Delete document we just added:
engine.delete(new Engine.Delete("test", "1", newUid("1"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
@@ -1635,7 +1636,7 @@ public class InternalEngineTests extends ESTestCase {
// Try to index uid=1 with a too-old version, should fail:
try {
- engine.index(new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ engine.index(new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false));
fail("did not hit expected exception");
} catch (VersionConflictEngineException vcee) {
// expected
@@ -1647,7 +1648,7 @@ public class InternalEngineTests extends ESTestCase {
// Try to index uid=2 with a too-old version, should fail:
try {
- engine.index(new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ engine.index(new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false));
fail("did not hit expected exception");
} catch (VersionConflictEngineException vcee) {
// expected
@@ -1738,7 +1739,7 @@ public class InternalEngineTests extends ESTestCase {
// expected
}
// now it should be OK.
- EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy()), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG);
+ EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG);
engine = new InternalEngine(config);
}
@@ -1746,7 +1747,7 @@ public class InternalEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -1760,7 +1761,6 @@ public class InternalEngineTests extends ESTestCase {
if (directory != null) {
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
// this so we have to disable the check explicitly
- directory.setPreventDoubleWrite(false);
boolean started = false;
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
@@ -1797,7 +1797,7 @@ public class InternalEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -1806,12 +1806,6 @@ public class InternalEngineTests extends ESTestCase {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
- final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
- if (directory != null) {
- // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
- // this so we have to disable the check explicitly
- directory.setPreventDoubleWrite(false);
- }
engine.close();
engine = new InternalEngine(engine.config());
@@ -1823,7 +1817,8 @@ public class InternalEngineTests extends ESTestCase {
}
private Mapping dynamicUpdate() {
- BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());
+ BuilderContext context = new BuilderContext(
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), new ContentPath());
final RootObjectMapper root = new RootObjectMapper.Builder("some_type").build(context);
return new Mapping(Version.CURRENT, root, new MetadataFieldMapper[0], emptyMap());
}
@@ -1892,7 +1887,7 @@ public class InternalEngineTests extends ESTestCase {
final int numExtraDocs = randomIntBetween(1, 10);
for (int i = 0; i < numExtraDocs; i++) {
ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), "extra" + Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -1921,7 +1916,7 @@ public class InternalEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -1930,12 +1925,6 @@ public class InternalEngineTests extends ESTestCase {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
- final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
- if (directory != null) {
- // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
- // this so we have to disable the check explicitly
- directory.setPreventDoubleWrite(false);
- }
TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
parser.mappingUpdate = dynamicUpdate();
@@ -1970,7 +1959,7 @@ public class InternalEngineTests extends ESTestCase {
int randomId = randomIntBetween(numDocs + 1, numDocs + 10);
String uuidValue = "test#" + Integer.toString(randomId);
ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
if (flush) {
@@ -1978,7 +1967,7 @@ public class InternalEngineTests extends ESTestCase {
}
doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(idxRequest);
engine.refresh("test");
assertThat(idxRequest.version(), equalTo(2L));
@@ -2015,16 +2004,18 @@ public class InternalEngineTests extends ESTestCase {
public final AtomicInteger recoveredOps = new AtomicInteger(0);
- public TranslogHandler(String indexName, ESLogger logger) {
+ public TranslogHandler(String indexName, Logger logger) {
super(new ShardId("test", "_na_", 0), null, logger);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
Index index = new Index(indexName, "_na_");
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
- AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
+ IndexAnalyzers indexAnalyzers = null;
+ NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer());
+ indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
- MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
+ MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null);
DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService);
this.docMapper = b.build(mapperService);
}
@@ -2044,7 +2035,7 @@ public class InternalEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -2053,12 +2044,6 @@ public class InternalEngineTests extends ESTestCase {
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
assertThat(topDocs.totalHits, equalTo(numDocs));
}
- final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
- if (directory != null) {
- // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
- // this so we have to disable the check explicitly
- directory.setPreventDoubleWrite(false);
- }
Translog.TranslogGeneration generation = engine.getTranslog().getGeneration();
engine.close();
@@ -2076,7 +2061,7 @@ public class InternalEngineTests extends ESTestCase {
config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(),
config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(),
IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
- TimeValue.timeValueMinutes(5), config.getRefreshListeners());
+ TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
try {
InternalEngine internalEngine = new InternalEngine(brokenConfig);
@@ -2131,14 +2116,15 @@ public class InternalEngineTests extends ESTestCase {
public void testCurrentTranslogIDisCommitted() throws IOException {
try (Store store = createStore()) {
- EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy());
+ EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
// create
{
ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG))){
+ assertFalse(engine.isRecovering());
engine.index(firstIndexRequest);
expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog());
@@ -2151,6 +2137,7 @@ public class InternalEngineTests extends ESTestCase {
{
for (int i = 0; i < 2; i++) {
try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) {
+ assertTrue(engine.isRecovering());
Map<String, String> userData = engine.getLastCommittedSegmentInfos().getUserData();
if (i == 0) {
assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY));
@@ -2196,7 +2183,7 @@ public class InternalEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
@@ -2206,7 +2193,7 @@ public class InternalEngineTests extends ESTestCase {
engine.forceMerge(randomBoolean(), 1, false, false, false);
ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
engine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(2L));
engine.flush(); // flush - buffered deletes are not counted
@@ -2218,4 +2205,315 @@ public class InternalEngineTests extends ESTestCase {
assertEquals(0, docStats.getDeleted());
assertEquals(numDocs, docStats.getCount());
}
+
+ public void testDoubleDelivery() throws IOException {
+ final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ Engine.Index operation = randomAppendOnly(1, doc, false);
+ Engine.Index retry = randomAppendOnly(1, doc, true);
+ if (randomBoolean()) {
+ engine.index(operation);
+ assertFalse(engine.indexWriterHasDeletions());
+ assertEquals(0, engine.getNumVersionLookups());
+ assertNotNull(operation.getTranslogLocation());
+ engine.index(retry);
+ assertTrue(engine.indexWriterHasDeletions());
+ assertEquals(0, engine.getNumVersionLookups());
+ assertNotNull(retry.getTranslogLocation());
+ assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0);
+ } else {
+ engine.index(retry);
+ assertTrue(engine.indexWriterHasDeletions());
+ assertEquals(0, engine.getNumVersionLookups());
+ assertNotNull(retry.getTranslogLocation());
+ engine.index(operation);
+ assertTrue(engine.indexWriterHasDeletions());
+ assertEquals(0, engine.getNumVersionLookups());
+ assertNotNull(retry.getTranslogLocation());
+ assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0);
+ }
+
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+ operation = randomAppendOnly(1, doc, false);
+ retry = randomAppendOnly(1, doc, true);
+ if (randomBoolean()) {
+ engine.index(operation);
+ assertNotNull(operation.getTranslogLocation());
+ engine.index(retry);
+ assertNotNull(retry.getTranslogLocation());
+ assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0);
+ } else {
+ engine.index(retry);
+ assertNotNull(retry.getTranslogLocation());
+ engine.index(operation);
+ assertNotNull(retry.getTranslogLocation());
+ assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0);
+ }
+
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+ }
+
+
+ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException {
+
+ final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ boolean isRetry = false;
+ long autoGeneratedIdTimestamp = 0;
+
+ Engine.Index index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1L));
+
+ index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1L));
+
+ isRetry = true;
+ index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1L));
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+
+ index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ replicaEngine.index(index);
+ replicaEngine.refresh("test");
+ try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+ }
+
+ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException {
+
+ final ParsedDocument doc = testParsedDocument("1", "1", "test", null, 100, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ boolean isRetry = true;
+ long autoGeneratedIdTimestamp = 0;
+
+
+ Engine.Index firstIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ engine.index(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1L));
+
+ Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ replicaEngine.index(firstIndexRequestReplica);
+ assertThat(firstIndexRequestReplica.version(), equalTo(1L));
+
+ isRetry = false;
+ Engine.Index secondIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ engine.index(secondIndexRequest);
+ assertTrue(secondIndexRequest.isCreated());
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+
+ Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ replicaEngine.index(secondIndexRequestReplica);
+ replicaEngine.refresh("test");
+ try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(1, topDocs.totalHits);
+ }
+ }
+
+ public Engine.Index randomAppendOnly(int docId, ParsedDocument doc, boolean retry) {
+ if (randomBoolean()) {
+ return new Engine.Index(newUid(Integer.toString(docId)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), docId, retry);
+ }
+ return new Engine.Index(newUid(Integer.toString(docId)), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, System.nanoTime(), docId, retry);
+ }
+
+ public void testRetryConcurrently() throws InterruptedException, IOException {
+ Thread[] thread = new Thread[randomIntBetween(3, 5)];
+ int numDocs = randomIntBetween(1000, 10000);
+ List<Engine.Index> docs = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ final ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, i, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ Engine.Index originalIndex = randomAppendOnly(i, doc, false);
+ Engine.Index retryIndex = randomAppendOnly(i, doc, true);
+ docs.add(originalIndex);
+ docs.add(retryIndex);
+ }
+ Collections.shuffle(docs, random());
+ CountDownLatch startGun = new CountDownLatch(thread.length);
+ AtomicInteger offset = new AtomicInteger(-1);
+ for (int i = 0; i < thread.length; i++) {
+ thread[i] = new Thread() {
+ @Override
+ public void run() {
+ startGun.countDown();
+ try {
+ startGun.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ int docOffset;
+ while ((docOffset = offset.incrementAndGet()) < docs.size()) {
+ engine.index(docs.get(docOffset));
+ }
+ }
+ };
+ thread[i].start();
+ }
+ for (int i = 0; i < thread.length; i++) {
+ thread[i].join();
+ }
+ assertEquals(0, engine.getNumVersionLookups());
+ assertEquals(0, engine.getNumIndexVersionsLookups());
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(numDocs, topDocs.totalHits);
+ }
+ assertTrue(engine.indexWriterHasDeletions());
+ }
+
+ public void testEngineMaxTimestampIsInitialized() throws IOException {
+ try (Store store = createStore();
+ Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE,
+ IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
+ assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
+
+ }
+
+ long maxTimestamp = Math.abs(randomLong());
+ try (Store store = createStore();
+ Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE,
+ maxTimestamp, null))) {
+ assertEquals(maxTimestamp, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
+ }
+ }
+
+ public void testAppendConcurrently() throws InterruptedException, IOException {
+ Thread[] thread = new Thread[randomIntBetween(3, 5)];
+ int numDocs = randomIntBetween(1000, 10000);
+ assertEquals(0, engine.getNumVersionLookups());
+ assertEquals(0, engine.getNumIndexVersionsLookups());
+ List<Engine.Index> docs = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ final ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, i, -1, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ Engine.Index index = randomAppendOnly(i, doc, false);
+ docs.add(index);
+ }
+ Collections.shuffle(docs, random());
+ CountDownLatch startGun = new CountDownLatch(thread.length);
+ AtomicInteger offset = new AtomicInteger(-1);
+ for (int i = 0; i < thread.length; i++) {
+ thread[i] = new Thread() {
+ @Override
+ public void run() {
+ startGun.countDown();
+ try {
+ startGun.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ int docOffset;
+ while ((docOffset = offset.incrementAndGet()) < docs.size()) {
+ engine.index(docs.get(docOffset));
+ }
+ }
+ };
+ thread[i].start();
+ }
+ for (int i = 0; i < thread.length; i++) {
+ thread[i].join();
+ }
+
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertEquals(docs.size(), topDocs.totalHits);
+ }
+ assertEquals(0, engine.getNumVersionLookups());
+ assertEquals(0, engine.getNumIndexVersionsLookups());
+ assertFalse(engine.indexWriterHasDeletions());
+
+ }
+
+ public static long getNumVersionLookups(InternalEngine engine) { // for other tests to access this
+ return engine.getNumVersionLookups();
+ }
+
+ public static long getNumIndexVersionsLookups(InternalEngine engine) { // for other tests to access this
+ return engine.getNumIndexVersionsLookups();
+ }
+
+ public void testFailEngineOnRandomIO() throws IOException, InterruptedException {
+ MockDirectoryWrapper wrapper = newMockDirectory();
+ final Path translogPath = createTempDir("testFailEngineOnRandomIO");
+ try (Store store = createStore(wrapper)) {
+ CyclicBarrier join = new CyclicBarrier(2);
+ CountDownLatch start = new CountDownLatch(1);
+ AtomicInteger controller = new AtomicInteger(0);
+ EngineConfig config = config(defaultSettings, store, translogPath, newMergePolicy(),
+ IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, new ReferenceManager.RefreshListener() {
+ @Override
+ public void beforeRefresh() throws IOException {
+ }
+
+ @Override
+ public void afterRefresh(boolean didRefresh) throws IOException {
+ int i = controller.incrementAndGet();
+ if (i == 1) {
+ throw new MockDirectoryWrapper.FakeIOException();
+ } else if (i == 2) {
+ try {
+ start.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ throw new AlreadyClosedException("boom");
+ }
+ }
+ });
+ InternalEngine internalEngine = new InternalEngine(config);
+ int docId = 0;
+ final ParsedDocument doc = testParsedDocument(Integer.toString(docId), Integer.toString(docId), "test", null, docId, -1,
+ testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+
+ Engine.Index index = randomAppendOnly(docId, doc, false);
+ internalEngine.index(index);
+ Runnable r = () -> {
+ try {
+ join.await();
+ } catch (Exception e) {
+ throw new AssertionError(e);
+ }
+ try {
+ internalEngine.refresh("test");
+ fail();
+ } catch (EngineClosedException ex) {
+ // we can't guarantee that we are entering the refresh call before it's fully
+ // closed so we also expecting ECE here
+ assertTrue(ex.toString(), ex.getCause() instanceof MockDirectoryWrapper.FakeIOException);
+ } catch (RefreshFailedEngineException | AlreadyClosedException ex) {
+ // fine
+ } finally {
+ start.countDown();
+ }
+
+ };
+ Thread t = new Thread(r);
+ Thread t1 = new Thread(r);
+ t.start();
+ t1.start();
+ t.join();
+ t1.join();
+ assertTrue(internalEngine.isClosed.get());
+ assertTrue(internalEngine.failedEngine.get() instanceof MockDirectoryWrapper.FakeIOException);
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java
new file mode 100644
index 0000000000..ed80b98c7f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.RamUsageTester;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.test.ESTestCase;
+
+public class LiveVersionMapTests extends ESTestCase {
+
+ public void testRamBytesUsed() throws Exception {
+ LiveVersionMap map = new LiveVersionMap();
+ for (int i = 0; i < 100000; ++i) {
+ BytesRefBuilder uid = new BytesRefBuilder();
+ uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
+ VersionValue version = new VersionValue(randomLong());
+ map.putUnderLock(uid.toBytesRef(), version);
+ }
+ long actualRamBytesUsed = RamUsageTester.sizeOf(map);
+ long estimatedRamBytesUsed = map.ramBytesUsed();
+ // less than 25% off
+ assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, actualRamBytesUsed / 4);
+
+ // now refresh
+ map.beforeRefresh();
+ map.afterRefresh(true);
+
+ for (int i = 0; i < 100000; ++i) {
+ BytesRefBuilder uid = new BytesRefBuilder();
+ uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
+ VersionValue version = new VersionValue(randomLong());
+ map.putUnderLock(uid.toBytesRef(), version);
+ }
+ actualRamBytesUsed = RamUsageTester.sizeOf(map);
+ estimatedRamBytesUsed = map.ramBytesUsed();
+ // less than 25% off
+ assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, actualRamBytesUsed / 4);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
index 5083ddbd1d..6dea774f25 100644
--- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
@@ -38,6 +38,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
@@ -88,9 +89,6 @@ import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
-/**
- * TODO: document me!
- */
public class ShadowEngineTests extends ESTestCase {
protected final ShardId shardId = new ShardId("index", "_na_", 1);
@@ -250,7 +248,7 @@ public class ShadowEngineTests extends ESTestCase {
EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(),
mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null,
IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
- TimeValue.timeValueMinutes(5), refreshListeners);
+ TimeValue.timeValueMinutes(5), refreshListeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
return config;
}
@@ -612,8 +610,7 @@ public class ShadowEngineTests extends ESTestCase {
// but, we can still get it (in realtime)
Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source().source, equalTo(B_1));
- assertThat(getResult.docIdAndVersion(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
// can't get it from the replica, because it's not in the translog for a shadow replica
@@ -623,10 +620,8 @@ public class ShadowEngineTests extends ESTestCase {
// but, not there non realtime
getResult = primaryEngine.get(new Engine.Get(false, newUid("1")));
- assertThat(getResult.exists(), equalTo(false));
+ assertThat(getResult.exists(), equalTo(true));
getResult.release();
- // refresh and it should be there
- primaryEngine.refresh("test");
// now its there...
searchResult = primaryEngine.acquireSearcher("test");
@@ -663,8 +658,7 @@ public class ShadowEngineTests extends ESTestCase {
// but, we can still get it (in realtime)
getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source().source, equalTo(B_2));
- assertThat(getResult.docIdAndVersion(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
// refresh and it should be updated
@@ -747,7 +741,6 @@ public class ShadowEngineTests extends ESTestCase {
// and, verify get (in real time)
getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source(), nullValue());
assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
@@ -760,7 +753,6 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close();
getResult = replicaEngine.get(new Engine.Get(true, newUid("1")));
assertThat(getResult.exists(), equalTo(true));
- assertThat(getResult.source(), nullValue());
assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release();
@@ -995,7 +987,7 @@ public class ShadowEngineTests extends ESTestCase {
final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime());
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
primaryEngine.index(firstIndexRequest);
assertThat(firstIndexRequest.version(), equalTo(1L));
}
diff --git a/core/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java b/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java
index d9371707e3..e66f55ff67 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java
@@ -19,27 +19,21 @@
package org.elasticsearch.index.engine;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.rest.RestStatus;
+import org.apache.lucene.util.RamUsageTester;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogTests;
+import org.elasticsearch.test.ESTestCase;
-import java.io.IOException;
+public class VersionValueTests extends ESTestCase {
-/**
- *
- */
-public class FlushNotAllowedEngineException extends EngineException {
-
- public FlushNotAllowedEngineException(ShardId shardId, String msg) {
- super(shardId, msg);
+ public void testRamBytesUsed() {
+ VersionValue versionValue = new VersionValue(randomLong());
+ assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
}
- public FlushNotAllowedEngineException(StreamInput in) throws IOException{
- super(in);
+ public void testDeleteRamBytesUsed() {
+ DeleteVersionValue versionValue = new DeleteVersionValue(randomLong(), randomLong());
+ assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
}
- @Override
- public RestStatus status() {
- return RestStatus.SERVICE_UNAVAILABLE;
- }
}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java
index 96c70b3f55..1fa7272d2d 100644
--- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java
@@ -42,7 +42,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.mapper.BinaryFieldMapper;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
+import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LegacyByteFieldMapper;
import org.elasticsearch.index.mapper.LegacyDoubleFieldMapper;
import org.elasticsearch.index.mapper.LegacyFloatFieldMapper;
@@ -119,7 +119,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
fieldType = new LegacyByteFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType();
} else if (type.equals("geo_point")) {
if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) {
- fieldType = new GeoPointFieldMapperLegacy.Builder(fieldName).docValues(docValues).build(context).fieldType();
+ fieldType = new LegacyGeoPointFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType();
} else {
fieldType = new GeoPointFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType();
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java
index ace711ad38..044f74e271 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java
@@ -20,22 +20,31 @@
package org.elasticsearch.index.mapper;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.BinaryFieldMapper;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@@ -43,6 +52,11 @@ import static org.hamcrest.Matchers.instanceOf;
*/
public class BinaryFieldMapperTests extends ESSingleNodeTestCase {
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaultMapping() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
@@ -92,4 +106,23 @@ public class BinaryFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(new BytesArray(value), originalValue);
}
}
+
+ public void testEmptyName() throws IOException {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "binary").endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+
+ DocumentMapper defaultMapper = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java
index 86eb1ef961..b07f3b43ff 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java
@@ -47,11 +47,15 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.hamcrest.Matchers.containsString;
+
public class BooleanFieldMapperTests extends ESSingleNodeTestCase {
IndexService indexService;
@@ -214,4 +218,25 @@ public class BooleanFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(DocValuesType.NONE, LegacyStringMappingTests.docValuesType(doc, "bool2"));
assertEquals(DocValuesType.SORTED_NUMERIC, LegacyStringMappingTests.docValuesType(doc, "bool3"));
}
+
+ public void testEmptyName() throws IOException {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "boolean").endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java
index 601909d8e0..a44941a19d 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapper2xTests.java
@@ -23,10 +23,13 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.CompletionFieldMapper2x;
import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -39,6 +42,7 @@ import java.util.Map;
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
@@ -134,4 +138,19 @@ public class CompletionFieldMapper2xTests extends ESSingleNodeTestCase {
assertThat(configMap.get("analyzer").toString(), is("simple"));
}
+ public void testEmptyName() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "completion").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test",
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id).build())
+ .mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("");
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper2x.class));
+
+ CompletionFieldMapper2x completionFieldMapper = (CompletionFieldMapper2x) fieldMapper;
+ assertThat(completionFieldMapper.isStoringPayloads(), is(false));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java
index 69447d24fd..78f9f355b0 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java
@@ -35,13 +35,8 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.NamedAnalyzer;
-import org.elasticsearch.index.mapper.CompletionFieldMapper;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
@@ -283,7 +278,7 @@ public class CompletionFieldMapperTests extends ESSingleNodeTestCase {
.field("weight", 4)
.endObject()
.startObject()
- .field("input", "suggestion4", "suggestion5", "suggestion6")
+ .array("input", "suggestion4", "suggestion5", "suggestion6")
.field("weight", 5)
.endObject()
.endArray()
@@ -427,4 +422,17 @@ public class CompletionFieldMapperTests extends ESSingleNodeTestCase {
}
assertThat(actualFieldCount, equalTo(expected));
}
+
+ public void testEmptyName() throws IOException {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "completion").endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java
index e71ac5b492..cd08ba98a8 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java
@@ -28,16 +28,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.LegacyLongFieldMapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.Arrays;
@@ -46,7 +37,6 @@ import java.util.Map;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.startsWith;
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java
index f0e4e8c15a..a80f94845d 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java
@@ -21,18 +21,29 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
+import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
public class DateFieldMapperTests extends ESSingleNodeTestCase {
@@ -46,6 +57,11 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase {
parser = indexService.mapperService().documentMapperParser();
}
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaults() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "date").endObject().endObject()
@@ -317,4 +333,25 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase {
Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping)));
assertEquals("[format] must not have a [null] value", e.getMessage());
}
+
+ public void testEmptyName() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "date")
+ .field("format", "epoch_second").endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().toString());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java
index 3320b90a99..55f6b0b52c 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java
@@ -19,19 +19,30 @@
package org.elasticsearch.index.mapper;
+import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
+import org.apache.lucene.index.IndexableField;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.lucene.all.AllField;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
@@ -42,6 +53,11 @@ import static org.hamcrest.Matchers.instanceOf;
// TODO: make this a real unit test
public class DocumentParserTests extends ESSingleNodeTestCase {
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testTypeDisabled() throws Exception {
DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
@@ -177,7 +193,8 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
// creates an object mapper, which is about 100x harder than it should be....
ObjectMapper createObjectMapper(MapperService mapperService, String name) throws Exception {
- ParseContext context = new ParseContext.InternalParseContext(Settings.EMPTY,
+ ParseContext context = new ParseContext.InternalParseContext(
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(),
mapperService.documentMapperParser(), mapperService.documentMapper("type"), null, null);
String[] nameParts = name.split("\\.");
for (int i = 0; i < nameParts.length - 1; ++i) {
@@ -276,7 +293,7 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startArray("dynamic_templates").startObject().startObject("georule")
.field("match", "foo*")
- .startObject("mapping").field("type", "geo_point").endObject()
+ .startObject("mapping").field("type", "geo_point").field("doc_values", false).endObject()
.endObject().endObject().endArray().endObject().endObject().string();
DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping));
@@ -356,7 +373,7 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
public void testMappedGeoPointArray() throws Exception {
DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("foo").field("type", "geo_point")
+ .startObject("properties").startObject("foo").field("type", "geo_point").field("doc_values", false)
.endObject().endObject().endObject().endObject().string();
DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping));
@@ -1154,4 +1171,48 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
}
+
+ public void testIncludeInAllPropagation() throws IOException {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "keyword")
+ .endObject()
+ .startObject("o")
+ .field("include_in_all", false)
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "keyword")
+ .endObject()
+ .startObject("o")
+ .field("include_in_all", true)
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "keyword")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping));
+ ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("a", "b")
+ .startObject("o")
+ .field("a", "c")
+ .startObject("o")
+ .field("a", "d")
+ .endObject()
+ .endObject()
+ .endObject().bytes());
+ Set<String> values = new HashSet<>();
+ for (IndexableField f : doc.rootDoc().getFields("_all")) {
+ values.add(f.stringValue());
+ }
+ assertEquals(new HashSet<>(Arrays.asList("b", "d")), values);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java
index 0cd6f93ba3..d634d8cd4f 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java
@@ -29,6 +29,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndicesService;
@@ -76,12 +77,14 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase {
clusterService = createClusterService(THREAD_POOL);
transport = new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(Collections.emptyList()),
new NoneCircuitBreakerService());
- transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
+ transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
indicesService = getInstanceFromNode(IndicesService.class);
shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL);
actionFilters = new ActionFilters(Collections.emptySet());
indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
- autoCreateIndex = new AutoCreateIndex(settings, indexNameExpressionResolver);
+ autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
+ indexNameExpressionResolver);
}
@After
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java
index f1541ed02b..6be11ced1e 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java
@@ -19,20 +19,16 @@
package org.elasticsearch.index.mapper;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.KeywordFieldMapper;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -44,6 +40,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@@ -65,7 +62,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser()));
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
- indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject(ExternalMetadataMapper.CONTENT_TYPE)
@@ -88,8 +85,10 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getField("field.point"), notNullValue());
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
+ } else if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ GeoPoint point = new GeoPoint().resetFromIndexableField(doc.rootDoc().getField("field.point"));
+ assertThat(point.lat(), closeTo(42.0, 1e-5));
+ assertThat(point.lon(), closeTo(51.0, 1e-5));
}
assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
@@ -112,7 +111,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
- indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
@@ -146,17 +145,29 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getField("field.point"), notNullValue());
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
+ } else {
+ GeoPoint point = new GeoPoint().resetFromIndexableField(doc.rootDoc().getField("field.point"));
+ assertThat(point.lat(), closeTo(42.0, 1E-5));
+ assertThat(point.lon(), closeTo(51.0, 1E-5));
}
- assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
+ IndexableField shape = doc.rootDoc().getField("field.shape");
+ assertThat(shape, notNullValue());
- assertThat(doc.rootDoc().getField("field.field"), notNullValue());
- assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo"));
+ IndexableField field = doc.rootDoc().getField("field.field");
+ assertThat(field, notNullValue());
+ assertThat(field.stringValue(), is("foo"));
- assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue());
- assertThat(doc.rootDoc().getField("field.field.raw").binaryValue(), is(new BytesRef("foo")));
+ IndexableField raw = doc.rootDoc().getField("field.field.raw");
+
+ assertThat(raw, notNullValue());
+ if (version.before(Version.V_5_0_0_alpha1)) {
+ assertThat(raw.stringValue(), is("foo"));
+ } else {
+ assertThat(raw.binaryValue(), is(new BytesRef("foo")));
+ }
}
public void testExternalValuesWithMultifieldTwoLevels() throws Exception {
@@ -170,7 +181,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
- indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
@@ -208,7 +219,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getField("field.point"), notNullValue());
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java
index 7d44b1fc48..1efae7ccb2 100755
--- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java
@@ -27,19 +27,6 @@ import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.builders.ShapeBuilders;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
-import org.elasticsearch.index.mapper.BinaryFieldMapper;
-import org.elasticsearch.index.mapper.BooleanFieldMapper;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
-import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.TermBasedFieldType;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import java.io.IOException;
import java.nio.charset.Charset;
@@ -71,7 +58,8 @@ public class ExternalMapper extends FieldMapper {
private BinaryFieldMapper.Builder binBuilder = new BinaryFieldMapper.Builder(Names.FIELD_BIN);
private BooleanFieldMapper.Builder boolBuilder = new BooleanFieldMapper.Builder(Names.FIELD_BOOL);
private GeoPointFieldMapper.Builder pointBuilder = new GeoPointFieldMapper.Builder(Names.FIELD_POINT);
- private GeoPointFieldMapperLegacy.Builder legacyPointBuilder = new GeoPointFieldMapperLegacy.Builder(Names.FIELD_POINT);
+ private LegacyGeoPointFieldMapper.Builder legacyPointBuilder = new LegacyGeoPointFieldMapper.Builder(Names.FIELD_POINT);
+ private LatLonPointFieldMapper.Builder latLonPointBuilder = new LatLonPointFieldMapper.Builder(Names.FIELD_POINT);
private GeoShapeFieldMapper.Builder shapeBuilder = new GeoShapeFieldMapper.Builder(Names.FIELD_SHAPE);
private Mapper.Builder stringBuilder;
private String generatedValue;
@@ -95,8 +83,14 @@ public class ExternalMapper extends FieldMapper {
context.path().add(name);
BinaryFieldMapper binMapper = binBuilder.build(context);
BooleanFieldMapper boolMapper = boolBuilder.build(context);
- BaseGeoPointFieldMapper pointMapper = (context.indexCreatedVersion().before(Version.V_2_2_0)) ?
- legacyPointBuilder.build(context) : pointBuilder.build(context);
+ BaseGeoPointFieldMapper pointMapper;
+ if (context.indexCreatedVersion().before(Version.V_2_2_0)) {
+ pointMapper = legacyPointBuilder.build(context);
+ } else if (context.indexCreatedVersion().onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ pointMapper = latLonPointBuilder.build(context);
+ } else {
+ pointMapper = pointBuilder.build(context);
+ }
GeoShapeFieldMapper shapeMapper = shapeBuilder.build(context);
FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context);
context.path().remove();
@@ -211,7 +205,7 @@ public class ExternalMapper extends FieldMapper {
MultiFields multiFieldsUpdate = multiFields.updateFieldType(fullNameToFieldType);
BinaryFieldMapper binMapperUpdate = (BinaryFieldMapper) binMapper.updateFieldType(fullNameToFieldType);
BooleanFieldMapper boolMapperUpdate = (BooleanFieldMapper) boolMapper.updateFieldType(fullNameToFieldType);
- GeoPointFieldMapper pointMapperUpdate = (GeoPointFieldMapper) pointMapper.updateFieldType(fullNameToFieldType);
+ BaseGeoPointFieldMapper pointMapperUpdate = (BaseGeoPointFieldMapper) pointMapper.updateFieldType(fullNameToFieldType);
GeoShapeFieldMapper shapeMapperUpdate = (GeoShapeFieldMapper) shapeMapper.updateFieldType(fullNameToFieldType);
TextFieldMapper stringMapperUpdate = (TextFieldMapper) stringMapper.updateFieldType(fullNameToFieldType);
if (update == this
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java
index 342fa247d7..c75871e43b 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java
@@ -112,7 +112,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
assertThat(response.getHits().totalHits(), equalTo((long) 1));
response = client().prepareSearch("test-idx")
- .setPostFilter(QueryBuilders.geoDistanceRangeQuery("field.point", 42.0, 51.0).to("1km"))
+ .setPostFilter(QueryBuilders.geoDistanceQuery("field.point").point(42.0, 51.0).distance("1km"))
.execute().actionGet();
assertThat(response.getHits().totalHits(), equalTo((long) 1));
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java
index baa9f72801..544764a9b5 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java
@@ -24,24 +24,11 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.MetadataFieldMapper;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TermBasedFieldType;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.mapper.MapperRegistry;
-import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
@@ -244,9 +231,9 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap("_dummy", new DummyMetadataFieldMapper.TypeParser())
);
final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
- MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService,
- indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}"));
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoEncodingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoEncodingTests.java
index 63377ca963..4840dcc71a 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/GeoEncodingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoEncodingTests.java
@@ -23,7 +23,6 @@ import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.unit.DistanceUnit.Distance;
-import org.elasticsearch.index.mapper.GeoPointFieldMapperLegacy;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
@@ -38,7 +37,7 @@ public class GeoEncodingTests extends ESTestCase {
final double lat = randomDouble() * 180 - 90;
final double lon = randomDouble() * 360 - 180;
final Distance precision = new Distance(1+(randomDouble() * 9), randomFrom(Arrays.asList(DistanceUnit.MILLIMETERS, DistanceUnit.METERS, DistanceUnit.KILOMETERS)));
- final GeoPointFieldMapperLegacy.Encoding encoding = GeoPointFieldMapperLegacy.Encoding.of(precision);
+ final LegacyGeoPointFieldMapper.Encoding encoding = LegacyGeoPointFieldMapper.Encoding.of(precision);
assertThat(encoding.precision().convert(DistanceUnit.METERS).value, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
final GeoPoint geoPoint = encoding.decode(encoding.encodeCoordinate(lat), encoding.encodeCoordinate(lon), new GeoPoint());
final double error = GeoDistance.PLANE.calculate(lat, lon, geoPoint.lat(), geoPoint.lon(), DistanceUnit.METERS);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java
index 9262c6d0d6..7c4acb4403 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java
@@ -18,7 +18,6 @@
*/
package org.elasticsearch.index.mapper;
-import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
@@ -31,23 +30,20 @@ import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.geo.RandomGeoGenerator;
+import org.hamcrest.CoreMatchers;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.lang.NumberFormatException;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@@ -66,12 +62,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
return pluginList(InternalSettingsPlugin.class);
}
- public void testLatLonValues() throws Exception {
+ public void testLegacyLatLonValues() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
.endObject().endObject().string();
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -95,13 +91,13 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testLatLonValuesWithGeohash() throws Exception {
+ public void testLegacyLatLonValuesWithGeohash() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
.field("geohash", true).endObject().endObject()
.endObject().endObject().string();
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -120,12 +116,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testLatLonInOneValueWithGeohash() throws Exception {
+ public void testLegacyLatLonInOneValueWithGeohash() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
.field("geohash", true).endObject().endObject().endObject().endObject().string();
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -144,12 +140,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testGeoHashIndexValue() throws Exception {
+ public void testLegacyGeoHashIndexValue() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
.field("geohash", true).endObject().endObject().endObject().endObject().string();
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -169,11 +165,13 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
public void testGeoHashValue() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -183,13 +181,15 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- assertThat(doc.rootDoc().get("point"), notNullValue());
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ }
}
- public void testNormalizeLatLonValuesDefault() throws Exception {
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ public void testNormalizeLegacyLatLonValuesDefault() throws Exception {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
// default to normalize
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point");
@@ -238,8 +238,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testValidateLatLonValues() throws Exception {
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ public void testLegacyValidateLatLonValues() throws Exception {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true);
if (version.before(Version.V_2_2_0)) {
@@ -337,8 +337,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testNoValidateLatLonValues() throws Exception {
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ public void testNoValidateLegacyLatLonValues() throws Exception {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true);
if (version.before(Version.V_2_2_0)) {
@@ -399,11 +399,13 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
public void testLatLonValuesStored() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .field("store", true).endObject().endObject().endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -413,24 +415,29 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
- assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
}
}
public void testArrayLatLonValues() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .field("store", true).endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("doc_values", false);
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -443,28 +450,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- if (version.onOrAfter(Version.V_5_0_0_alpha2)) {
- assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(4));
- assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(4));
-
- // point field for 1st value
- assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
- // stored field for 1st value
- assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3));
- // indexed hash
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
-
- // point field for 2nd value
- assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4));
- assertThat(doc.rootDoc().getFields("point.lon")[2].numericValue().doubleValue(), equalTo(1.5));
- // stored field for 2nd value
- assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4));
- assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5));
- // indexed hash
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
+ // doc values are enabled by default, but in this test we disable them; we should only have 2 points
+ assertThat(doc.rootDoc().getFields("point"), notNullValue());
+ if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getFields("point").length, equalTo(4));
} else {
+ assertThat(doc.rootDoc().getFields("point").length, equalTo(2));
+ }
+ if (version.before(Version.V_5_0_0_alpha2)) {
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
@@ -485,13 +478,16 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
public void testLatLonInOneValue() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type",
+ new CompressedXContent(mapping));
ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
@@ -499,49 +495,61 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
}
}
public void testLatLonInOneValueStored() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", true).endObject().endObject()
- .endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type",
+ new CompressedXContent(mapping));
ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("point", "1.2,1.3")
.endObject()
.bytes());
-
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()),
+ equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
}
}
public void testLatLonInOneValueArray() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .field("store", true).endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("doc_values", false);
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type",
+ new CompressedXContent(mapping));
ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
@@ -552,41 +560,39 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
+ // doc values are enabled by default, but in this test we disable them; we should only have 2 points
+ assertThat(doc.rootDoc().getFields("point"), notNullValue());
if (version.before(Version.V_5_0_0_alpha2)) {
+ assertThat(doc.rootDoc().getFields("point").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
- } else {
- IndexableField[] latPoints = doc.rootDoc().getFields("point.lat");
- IndexableField[] lonPoints = doc.rootDoc().getFields("point.lon");
- assertThat(latPoints.length, equalTo(4));
- assertThat(lonPoints.length, equalTo(4));
- assertThat(latPoints[0].numericValue().doubleValue(), equalTo(1.2));
- assertThat(lonPoints[0].numericValue().doubleValue(), equalTo(1.3));
- assertThat(latPoints[2].numericValue().doubleValue(), equalTo(1.4));
- assertThat(lonPoints[2].numericValue().doubleValue(), equalTo(1.5));
+ } else if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getFields("point").length, equalTo(4));
}
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
}
public void testLonLatArray() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
- .endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -596,22 +602,27 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
}
}
public void testLonLatArrayDynamic() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startArray("dynamic_templates").startObject()
- .startObject("point").field("match", "point*").startObject("mapping").field("type", "geo_point")
- .field("lat_lon", true).endObject().endObject().endObject().endArray().endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startArray("dynamic_templates").startObject().startObject("point").field("match", "point*")
+ .startObject("mapping").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.endObject().endObject().endObject().endArray().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -621,21 +632,26 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
}
}
public void testLonLatArrayStored() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .field("store", true).endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -645,23 +661,31 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
- assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getField("point"), notNullValue());
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ if (version.before(Version.V_2_2_0)) {
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ } else {
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ }
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
+ assertThat(doc.rootDoc().getFields("point").length, equalTo(3));
}
}
public void testLonLatArrayArrayStored() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
- .field("store", true).endObject().endObject().endObject().endObject().string();
-
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true);
+ }
+ String mapping = xContentBuilder.field("store", true).field("doc_values", false).endObject().endObject()
+ .endObject().endObject().string();
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -674,7 +698,9 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
+ assertThat(doc.rootDoc().getFields("point"), notNullValue());
if (version.before(Version.V_5_0_0_alpha2)) {
+ assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(2));
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
@@ -691,19 +717,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
} else {
assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
- } else {
- assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(4));
- assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(4));
- assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2));
- assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
- assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3));
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
- assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4));
- assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4));
- assertThat(doc.rootDoc().getFields("point.lon")[2].numericValue().doubleValue(), equalTo(1.5));
- assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5));
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
+ } else if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(4));
}
}
@@ -716,11 +731,35 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser();
// test deprecation exceptions on newly created indexes
- try {
- String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("validate", true).endObject().endObject()
+ if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ try {
+ String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
.endObject().endObject().string();
+ parser.parse("type", new CompressedXContent(normalizeMapping));
+ } catch (MapperParsingException e) {
+ assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [lat_lon : true]");
+ }
+ }
+
+ if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ try {
+ String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+ parser.parse("type", new CompressedXContent(normalizeMapping));
+ } catch (MapperParsingException e) {
+ assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]");
+ }
+ }
+
+ try {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String validateMapping = xContentBuilder.field("validate", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(validateMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -728,10 +767,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
try {
- String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("validate_lat", true).endObject().endObject()
- .endObject().endObject().string();
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String validateMapping = xContentBuilder.field("validate_lat", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(validateMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -739,10 +780,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
try {
- String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("validate_lon", true).endObject().endObject()
- .endObject().endObject().string();
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String validateMapping = xContentBuilder.field("validate_lon", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(validateMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -751,10 +794,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
// test deprecated normalize
try {
- String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("normalize", true).endObject().endObject()
- .endObject().endObject().string();
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String normalizeMapping = xContentBuilder.field("normalize", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(normalizeMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -762,10 +807,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
try {
- String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("normalize_lat", true).endObject().endObject()
- .endObject().endObject().string();
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String normalizeMapping = xContentBuilder.field("normalize_lat", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(normalizeMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -773,10 +820,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
try {
- String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
- .field("normalize_lon", true).endObject().endObject()
- .endObject().endObject().string();
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point");
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
+ }
+ String normalizeMapping = xContentBuilder.field("normalize_lon", true).endObject().endObject().endObject().endObject().string();
parser.parse("type", new CompressedXContent(normalizeMapping));
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
} catch (MapperParsingException e) {
@@ -784,8 +833,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
}
}
- public void testGeoPointMapperMerge() throws Exception {
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ public void testLegacyGeoPointMapperMerge() throws Exception {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
@@ -811,7 +860,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
}
- public void testGeoHashSearch() throws Exception {
+ public void testLegacyGeoHashSearch() throws Exception {
// create a geo_point mapping with geohash enabled and random (between 1 and 12) geohash precision
int precision = randomIntBetween(1, 12);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location")
@@ -819,7 +868,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().string();
// create index and add a test point (dr5regy6rc6z)
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha1);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").setSettings(settings)
.addMapping("pin", mapping);
@@ -836,7 +885,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
assertEquals("dr5regy6rc6y".substring(0, precision), m.get("location.geohash").value());
}
- public void testGeoHashSearchWithPrefix() throws Exception {
+ public void testLegacyGeoHashSearchWithPrefix() throws Exception {
// create a geo_point mapping with geohash enabled and random (between 1 and 12) geohash precision
int precision = randomIntBetween(1, 12);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location")
@@ -844,7 +893,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().endObject().string();
// create index and add a test point (dr5regy6rc6z)
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").setSettings(settings)
.addMapping("pin", mapping);
@@ -869,9 +918,11 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
public void testMultiField() throws Exception {
int numDocs = randomIntBetween(10, 100);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location")
- .field("type", "geo_point").startObject("fields")
- .startObject("geohash").field("type", "geo_point").field("geohash_precision", 12).field("geohash_prefix", true).endObject()
- .startObject("latlon").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .field("type", "geo_point")
+ .startObject("fields")
+ .startObject("geohash").field("type", "keyword").endObject() // test geohash as keyword
+ .startObject("latlon").field("type", "string").endObject() // test geohash as string
+ .endObject()
.endObject().endObject().endObject().endObject().string();
CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test")
.addMapping("pin", mapping);
@@ -885,6 +936,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
.field("lon", pt.lon()).endObject().endObject()).setRefreshPolicy(IMMEDIATE).get();
}
+ // TODO these tests are bogus and need to be Fix
// query by geohash subfield
SearchResponse searchResponse = client().prepareSearch().addStoredField("location.geohash").setQuery(matchAllQuery()).execute().actionGet();
assertEquals(numDocs, searchResponse.getHits().totalHits());
@@ -893,4 +945,29 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
searchResponse = client().prepareSearch().addStoredField("location.latlon").setQuery(matchAllQuery()).execute().actionGet();
assertEquals(numDocs, searchResponse.getHits().totalHits());
}
+
+
+ public void testEmptyName() throws Exception {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "geo_point").endObject().endObject()
+ .endObject().endObject().string();
+
+ Version version = Version.CURRENT;
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
+ DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser2x.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java
index 6e6222ac87..fac30002fb 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java
@@ -18,16 +18,13 @@
*/
package org.elasticsearch.index.mapper;
-import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
-import org.elasticsearch.index.mapper.LegacyDoubleFieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.StringFieldMapper;
+import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
import org.junit.Before;
public class GeoPointFieldTypeTests extends FieldTypeTestCase {
@Override
protected MappedFieldType createDefaultFieldType() {
- return new BaseGeoPointFieldMapper.GeoPointFieldType();
+ return new LegacyGeoPointFieldType();
}
@Before
@@ -35,13 +32,14 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase {
addModifier(new Modifier("geohash", false) {
@Override
public void modify(MappedFieldType ft) {
- ((BaseGeoPointFieldMapper.GeoPointFieldType)ft).setGeoHashEnabled(new StringFieldMapper.StringFieldType(), 1, true);
+ ((LegacyGeoPointFieldType)ft).setGeoHashEnabled(new StringFieldMapper.StringFieldType(), 1, true);
}
});
addModifier(new Modifier("lat_lon", false) {
@Override
public void modify(MappedFieldType ft) {
- ((BaseGeoPointFieldMapper.GeoPointFieldType)ft).setLatLonEnabled(new LegacyDoubleFieldMapper.DoubleFieldType(), new LegacyDoubleFieldMapper.DoubleFieldType());
+ ((LegacyGeoPointFieldType)ft).setLatLonEnabled(new LegacyDoubleFieldMapper.DoubleFieldType(),
+ new LegacyDoubleFieldMapper.DoubleFieldType());
}
});
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java
index 65856b7afa..4a22d56e8a 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java
@@ -22,24 +22,38 @@ import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
+import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase {
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaultConfiguration() throws IOException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("location")
@@ -423,4 +437,30 @@ public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase {
assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(1d)));
assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CW));
}
+
+ public void testEmptyName() throws Exception {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type1", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser();
+
+ e = expectThrows(IllegalArgumentException.class,
+ () -> parser2x.parse("type1", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("fieldName is required"));
+ }
+
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeohashMappingGeoPointTests.java
deleted file mode 100644
index 05581e7902..0000000000
--- a/core/src/test/java/org/elasticsearch/index/mapper/GeohashMappingGeoPointTests.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.mapper;
-
-import org.apache.lucene.spatial.geopoint.document.GeoPointField;
-import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.test.ESSingleNodeTestCase;
-import org.elasticsearch.test.InternalSettingsPlugin;
-import org.elasticsearch.test.VersionUtils;
-
-import java.util.Collection;
-
-import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.nullValue;
-
-/**
- *
- */
-public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase {
-
- @Override
- protected Collection<Class<? extends Plugin>> getPlugins() {
- return pluginList(InternalSettingsPlugin.class);
- }
-
- public void testLatLonValues() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false)
- .endObject().endObject().endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
-
- ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
- .startObject()
- .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().getField("point.lat"), nullValue());
- assertThat(doc.rootDoc().getField("point.lon"), nullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
- }
- }
-
- public void testLatLonInOneValue() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
- .endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
-
- ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
- .startObject()
- .field("point", "1.2,1.3")
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().getField("point.lat"), nullValue());
- assertThat(doc.rootDoc().getField("point.lon"), nullValue());
- if (version.before(Version.V_2_2_0)) {
- assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
- } else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
- }
- }
-
- public void testGeoHashValue() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true)
- .endObject().endObject().endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
-
- ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
- .startObject()
- .field("point", stringEncode(1.3, 1.2))
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().getField("point.lat"), nullValue());
- assertThat(doc.rootDoc().getField("point.lon"), nullValue());
- assertThat(doc.rootDoc().getBinaryValue("point.geohash"), equalTo(new BytesRef(stringEncode(1.3, 1.2))));
- assertThat(doc.rootDoc().get("point"), notNullValue());
- }
-
- public void testGeoHashPrecisionAsInteger() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true)
- .field("geohash_precision", 10).endObject().endObject().endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
- FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
- assertThat(mapper, instanceOf(BaseGeoPointFieldMapper.class));
- BaseGeoPointFieldMapper geoPointFieldMapper = (BaseGeoPointFieldMapper) mapper;
- assertThat(geoPointFieldMapper.fieldType().geoHashPrecision(), is(10));
- }
-
- public void testGeoHashPrecisionAsLength() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).field("geohash_precision", "5m").endObject().endObject()
- .endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
- FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
- assertThat(mapper, instanceOf(BaseGeoPointFieldMapper.class));
- BaseGeoPointFieldMapper geoPointFieldMapper = (BaseGeoPointFieldMapper) mapper;
- assertThat(geoPointFieldMapper.fieldType().geoHashPrecision(), is(10));
- }
-
- public void testNullValue() throws Exception {
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
- .endObject().endObject().string();
-
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
- Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
-
- ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
- .startObject()
- .field("point", (Object) null)
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().get("point"), nullValue());
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java
index 68ef2c2dd5..3d3a69ea80 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java
@@ -23,8 +23,11 @@ import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.network.InetAddresses;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -34,13 +37,18 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.IpFieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
import java.io.IOException;
import java.net.InetAddress;
+import java.util.Collection;
public class IpFieldMapperTests extends ESSingleNodeTestCase {
@@ -53,6 +61,11 @@ public class IpFieldMapperTests extends ESSingleNodeTestCase {
parser = indexService.mapperService().documentMapperParser();
}
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaults() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "ip").endObject().endObject()
@@ -290,4 +303,24 @@ public class IpFieldMapperTests extends ESSingleNodeTestCase {
assertTrue(got, got.contains("\"ignore_malformed\":false"));
assertTrue(got, got.contains("\"include_in_all\":false"));
}
+
+ public void testEmptyName() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", "ip").endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java
index 9c4eed15bc..396cbe49ee 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java
@@ -30,18 +30,18 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
@@ -77,7 +77,7 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
-
+
assertEquals(new BytesRef("1234"), fields[0].binaryValue());
IndexableFieldType fieldType = fields[0].fieldType();
assertThat(fieldType.omitNorms(), equalTo(true));
@@ -273,14 +273,28 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build());
parser = indexService.mapperService().documentMapperParser();
- String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject()
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "keyword")
+ .field("boost", 2f)
+ .endObject()
+ .endObject()
.endObject().endObject().string();
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
- String expectedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("field").field("type", "keyword")
- .field("boost", 2f).field("norms", true).endObject().endObject()
+ String expectedMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("boost", 2f)
+ .field("index", "not_analyzed")
+ .field("norms", true)
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
.endObject().endObject().string();
assertEquals(expectedMapping, mapper.mappingSource().toString());
}
@@ -304,4 +318,40 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(2, fields.length);
assertFalse(fields[0].fieldType().omitNorms());
}
+
+ public void testEmptyName() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("")
+ .field("type", "keyword")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ // Empty name not allowed in index created after 5.0
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // empty name allowed in index created before 5.0
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ String downgradedMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ assertEquals(downgradedMapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java
index 284581a844..79703986c8 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java
@@ -40,15 +40,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.LegacyDateFieldMapper;
-import org.elasticsearch.index.mapper.LegacyLongFieldMapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -114,11 +106,11 @@ public class LegacyDateFieldMapperTests extends ESSingleNodeTestCase {
assertThat(fieldMapper, instanceOf(LegacyDateFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
- assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
- assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
- assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
}
public void testParseLocal() {
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java
new file mode 100644
index 0000000000..426114cb38
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
+
+import java.util.Collection;
+
+import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class LegacyGeohashMappingGeoPointTests extends ESSingleNodeTestCase {
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true)
+ .endObject().endObject().endObject().endObject().string();
+
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_4_0);
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser()
+ .parse("type", new CompressedXContent(mapping));
+
+ ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", stringEncode(1.3, 1.2))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ assertThat(doc.rootDoc().getField("point.geohash").stringValue(), equalTo(stringEncode(1.3, 1.2)));
+ assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ public void testGeoHashPrecisionAsInteger() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true)
+ .field("geohash_precision", 10).endObject().endObject().endObject().endObject().string();
+
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_4_0);
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser()
+ .parse("type", new CompressedXContent(mapping));
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
+ assertThat(mapper, instanceOf(BaseGeoPointFieldMapper.class));
+ BaseGeoPointFieldMapper geoPointFieldMapper = (BaseGeoPointFieldMapper) mapper;
+ assertThat(((LegacyGeoPointFieldType)geoPointFieldMapper.fieldType()).geoHashPrecision(), is(10));
+ }
+
+ public void testGeoHashPrecisionAsLength() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true)
+ .field("geohash_precision", "5m").endObject().endObject()
+ .endObject().endObject().string();
+
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_4_0);
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
+ DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser()
+ .parse("type", new CompressedXContent(mapping));
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
+ assertThat(mapper, instanceOf(BaseGeoPointFieldMapper.class));
+ BaseGeoPointFieldMapper geoPointFieldMapper = (BaseGeoPointFieldMapper) mapper;
+ assertThat(((LegacyGeoPointFieldType)geoPointFieldMapper.fieldType()).geoHashPrecision(), is(10));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java
index e0cfce61af..3c07ec4b90 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java
@@ -24,7 +24,10 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.IndexableFieldType;
import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -33,18 +36,10 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.mapper.ContentPath;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.StringFieldMapper.Builder;
import org.elasticsearch.index.mapper.StringFieldMapper.StringFieldType;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.StringFieldMapper;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
@@ -54,9 +49,12 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.emptyMap;
+import static java.util.Collections.singletonList;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
@@ -753,4 +751,448 @@ public class LegacyStringMappingTests extends ESSingleNodeTestCase {
assertEquals("Cannot set position_increment_gap on field [field] without positions enabled", e.getMessage());
}
}
+
+ public void testKeywordFieldAsStringWithUnsupportedField() throws IOException {
+ String mapping = mappingForTestField(b -> b.field("type", "keyword").field("fielddata", true)).string();
+ Exception e = expectThrows(IllegalArgumentException.class, () -> parser.parse("test_type", new CompressedXContent(mapping)));
+ assertEquals("Automatic downgrade from [keyword] to [string] failed because parameters [fielddata] are not supported for "
+ + "automatic downgrades.", e.getMessage());
+ }
+
+ public void testMergeKeywordIntoString() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword"));
+ }
+
+ public void testMergeKeywordIntoStringWithIndexFalse() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "no");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "no"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("index", false));
+ }
+
+ public void testMergeKeywordIntoStringWithStore() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("store", true);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("store", true));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("store", true));
+ }
+
+ public void testMergeKeywordIntoStringWithDocValues() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("doc_values", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("doc_values", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("doc_values", false));
+ }
+
+ public void testMergeKeywordIntoStringWithNorms() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("norms", true);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("norms", true));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("norms", true));
+ // norms can be an array but it'll just get squashed into true/false
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed")
+ .startObject("norms")
+ .field("enabled", true)
+ .field("loading", randomAsciiOfLength(5)) // Totally ignored even though it used to be eager/lazy
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword")
+ .startObject("norms")
+ .field("enabled", true)
+ .field("loading", randomAsciiOfLength(5))
+ .endObject());
+ }
+
+ public void testMergeKeywordIntoStringWithBoost() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("boost", 1.5);
+ expectedMapping.put("norms", true); // Implied by having a boost
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("boost", 1.5));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("boost", 1.5));
+ expectedMapping.put("boost", 1.4);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("boost", 1.4));
+ }
+
+ public void testMergeKeywordIntoStringWithFields() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ Map<String, Object> expectedFields = new HashMap<>();
+ expectedMapping.put("fields", expectedFields);
+ Map<String, Object> expectedFoo = new HashMap<>();
+ expectedFields.put("foo", expectedFoo);
+ expectedFoo.put("type", "string");
+ expectedFoo.put("analyzer", "standard");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject());
+
+ Map<String, Object> expectedBar = new HashMap<>();
+ expectedFields.put("bar", expectedBar);
+ expectedBar.put("type", "string");
+ expectedBar.put("analyzer", "whitespace");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject());
+ }
+
+ public void testMergeKeywordIntoStringWithCopyTo() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("copy_to", singletonList("another_field"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("copy_to", "another_field"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("copy_to", "another_field"));
+ }
+
+ public void testMergeKeywordIntoStringWithIncludeInAll() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("include_in_all", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("include_in_all", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("include_in_all", false));
+ }
+
+ public void testMergeKeywordIntoStringWithIgnoreAbove() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("ignore_above", 128);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("ignore_above", 128));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("ignore_above", 128));
+ }
+
+ public void testMergeKeywordIntoStringWithIndexOptions() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("index_options", "freqs");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("index_options", "freqs"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("index_options", "freqs"));
+ }
+
+ public void testMergeKeywordIntoStringWithSimilarity() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("index", "not_analyzed");
+ expectedMapping.put("fielddata", false);
+ expectedMapping.put("similarity", "BM25");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index", "not_analyzed").field("similarity", "BM25"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "keyword").field("similarity", "BM25"));
+ }
+
+ public void testTextFieldAsStringWithUnsupportedField() throws IOException {
+ String mapping = mappingForTestField(b -> b.field("type", "text").field("null_value", "kitten")).string();
+ Exception e = expectThrows(IllegalArgumentException.class, () -> parser.parse("test_type", new CompressedXContent(mapping)));
+ assertEquals("Automatic downgrade from [text] to [string] failed because parameters [null_value] are not supported for "
+ + "automatic downgrades.", e.getMessage());
+ }
+
+ public void testMergeTextIntoString() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithStore() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("store", true);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("store", true));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("store", true).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithDocValues() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("doc_values", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("doc_values", false).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithNorms() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("norms", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("norms", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("norms", false).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithBoost() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("boost", 1.5);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("boost", 1.5));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("boost", 1.5).field("fielddata", true));
+ expectedMapping.put("boost", 1.4);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("boost", 1.4).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithFields() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ Map<String, Object> expectedFields = new HashMap<>();
+ expectedMapping.put("fields", expectedFields);
+ Map<String, Object> expectedFoo = new HashMap<>();
+ expectedFields.put("foo", expectedFoo);
+ expectedFoo.put("type", "string");
+ expectedFoo.put("analyzer", "standard");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("fielddata", true)
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject());
+
+ Map<String, Object> expectedBar = new HashMap<>();
+ expectedFields.put("bar", expectedBar);
+ expectedBar.put("type", "string");
+ expectedBar.put("analyzer", "whitespace");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("fielddata", true)
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject());
+ }
+
+ public void testMergeTextIntoStringWithCopyTo() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("copy_to", singletonList("another_field"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("copy_to", "another_field"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("copy_to", "another_field").field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithFileddataDisabled() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("fielddata", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("fielddata", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text"));
+ }
+
+ public void testMergeTextIntoStringWithEagerGlobalOrdinals() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("eager_global_ordinals", true);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").startObject("fielddata")
+ .field("format", "pagedbytes")
+ .field("loading", "eager_global_ordinals")
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("fielddata", true).field("eager_global_ordinals", true));
+ }
+
+ public void testMergeTextIntoStringWithFielddataFrequencyFilter() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ Map<String, Object> fielddataFrequencyFilter = new HashMap<>();
+ expectedMapping.put("fielddata_frequency_filter", fielddataFrequencyFilter);
+ fielddataFrequencyFilter.put("min", 0.001);
+ fielddataFrequencyFilter.put("max", 0.1);
+ fielddataFrequencyFilter.put("min_segment_size", 100);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").startObject("fielddata")
+ .field("format", "pagedbytes")
+ .startObject("filter")
+ .startObject("frequency")
+ .field("min", 0.001)
+ .field("max", 0.1)
+ .field("min_segment_size", 100)
+ .endObject()
+ .endObject()
+ .endObject());
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("fielddata", true)
+ .startObject("fielddata_frequency_filter")
+ .field("min", 0.001)
+ .field("max", 0.1)
+ .field("min_segment_size", 100)
+ .endObject());
+ }
+
+ public void testMergeTextIntoStringWithIncludeInAll() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("include_in_all", false);
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("include_in_all", false));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("include_in_all", false).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithSearchQuoteAnayzer() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("analyzer", "standard");
+ expectedMapping.put("search_analyzer", "whitespace");
+ expectedMapping.put("search_quote_analyzer", "keyword");
+ mergeMappingStep(expectedMapping, b -> b
+ .field("type", "string")
+ .field("analyzer", "standard")
+ .field("search_analyzer", "whitespace")
+ .field("search_quote_analyzer", "keyword"));
+ mergeMappingStep(expectedMapping, b -> b
+ .field("type", "text")
+ .field("analyzer", "standard")
+ .field("search_analyzer", "whitespace")
+ .field("search_quote_analyzer", "keyword")
+ .field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithIndexOptions() throws IOException {
+ String indexOptions = randomIndexOptions();
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ if (false == "positions".equals(indexOptions)) {
+ expectedMapping.put("index_options", indexOptions);
+ }
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("index_options", indexOptions));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("index_options", indexOptions).field("fielddata", true));
+ }
+
+ public void testMergeTextIntoStringWithPositionIncrementGap() throws IOException {
+ int positionIncrementGap = between(0, 10000);
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("position_increment_gap", positionIncrementGap);
+ mergeMappingStep(expectedMapping, b -> b
+ .field("type", "string")
+ .field("position_increment_gap", positionIncrementGap));
+ mergeMappingStep(expectedMapping, b -> b
+ .field("type", "text")
+ .field("position_increment_gap", positionIncrementGap)
+ .field("fielddata", true));
+ }
+
+ public void testMergeStringIntoStringWithSimilarity() throws IOException {
+ Map<String, Object> expectedMapping = new HashMap<>();
+ expectedMapping.put("type", "string");
+ expectedMapping.put("similarity", "BM25");
+ mergeMappingStep(expectedMapping, b -> b.field("type", "string").field("similarity", "BM25"));
+ mergeMappingStep(expectedMapping, b -> b.field("type", "text").field("similarity", "BM25").field("fielddata", true));
+ }
+
+ private interface FieldBuilder {
+ void populateMappingForField(XContentBuilder b) throws IOException;
+ }
+ private void mergeMappingStep(Map<String, Object> expectedMapping, FieldBuilder fieldBuilder) throws IOException {
+ XContentBuilder b = mappingForTestField(fieldBuilder);
+ if (logger.isInfoEnabled()) {
+ logger.info("--> Updating mapping to {}", b.string());
+ }
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test_type").setSource(b));
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings("test").get();
+ ImmutableOpenMap<String, MappingMetaData> index = response.getMappings().get("test");
+ assertNotNull("mapping for index not found", index);
+ MappingMetaData type = index.get("test_type");
+ assertNotNull("mapping for type not found", type);
+ Map<?, ?> properties = (Map<?, ?>) type.sourceAsMap().get("properties");
+ assertEquals(expectedMapping, properties.get("test_field"));
+ }
+
+ private XContentBuilder mappingForTestField(FieldBuilder fieldBuilder) throws IOException {
+ XContentBuilder b = JsonXContent.contentBuilder();
+ b.startObject(); {
+ b.startObject("test_type"); {
+ b.startObject("properties"); {
+ b.startObject("test_field"); {
+ fieldBuilder.populateMappingForField(b);
+ }
+ b.endObject();
+ }
+ b.endObject();
+ }
+ b.endObject();
+ }
+ return b.endObject();
+ }
+
+ private String randomIndexOptions() {
+ IndexOptions options = randomValueOtherThan(IndexOptions.NONE, () -> randomFrom(IndexOptions.values()));
+ switch (options) {
+ case DOCS:
+ return "docs";
+ case DOCS_AND_FREQS:
+ return "freqs";
+ case DOCS_AND_FREQS_AND_POSITIONS:
+ return "positions";
+ case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
+ return "offsets";
+ default:
+ throw new IllegalArgumentException("Unknown options [" + options + "]");
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
index e11f0b90e8..8711ead6ed 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
@@ -128,7 +128,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
.setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS)))
.get();
assertThat(countResponse.getHits().totalHits(), equalTo(1L));
- countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.toString())).get();
+ countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())).get();
assertThat(countResponse.getHits().totalHits(), equalTo(1L));
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
index 8375e54d39..5de43f5958 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
@@ -21,21 +21,32 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
public class NumberFieldMapperTests extends ESSingleNodeTestCase {
@@ -57,6 +68,11 @@ public class NumberFieldMapperTests extends ESSingleNodeTestCase {
}
}
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void doTestDefaults(String type) throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", type).endObject().endObject()
@@ -112,7 +128,7 @@ public class NumberFieldMapperTests extends ESSingleNodeTestCase {
public void testNoDocValues() throws Exception {
for (String type : TYPES) {
- doTestNotIndexed(type);
+ doTestNoDocValues(type);
}
}
@@ -377,4 +393,32 @@ public class NumberFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
assertFalse(dvField.fieldType().stored());
}
+
+ public void testEmptyName() throws IOException {
+ // after version 5
+ for (String type : TYPES) {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", type).endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+ }
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+ for (String type : TYPES) {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("").field("type", type).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
index b5a31e49a6..68959ccc68 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
@@ -19,13 +19,26 @@
package org.elasticsearch.index.mapper;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperService.MergeReason;
+import org.elasticsearch.index.mapper.ObjectMapper.Dynamic;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
+import java.io.IOException;
+import java.util.Collection;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
public class ObjectMapperTests extends ESSingleNodeTestCase {
@@ -155,4 +168,66 @@ public class ObjectMapperTests extends ESSingleNodeTestCase {
.string();
createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping));
}
+
+ public void testMerge() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "keyword")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ MapperService mapperService = createIndex("test").mapperService();
+ DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
+ assertNull(mapper.root().includeInAll());
+ assertNull(mapper.root().dynamic());
+ String update = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .field("include_in_all", false)
+ .field("dynamic", "strict")
+ .endObject().endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.MAPPING_UPDATE, false);
+ assertFalse(mapper.root().includeInAll());
+ assertEquals(Dynamic.STRICT, mapper.root().dynamic());
+ }
+
+ public void testEmptyName() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "text")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ // Empty name not allowed in index created after 5.0
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
+ createIndex("test").mapperService().documentMapperParser().parse("", new CompressedXContent(mapping));
+ });
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // empty name allowed in index created before 5.0
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ DocumentMapperParser parser = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("", new CompressedXContent(mapping));
+ String downgradedMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ assertEquals(downgradedMapping, defaultMapper.mappingSource().string());
+ }
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java
index 15738ada3b..078c60ce19 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java
@@ -18,30 +18,22 @@
*/
package org.elasticsearch.index.mapper;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
-import org.elasticsearch.index.mapper.ContentPath;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParentFieldMapper;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -111,10 +103,11 @@ public class ParentFieldMapperTests extends ESSingleNodeTestCase {
public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception {
Index index = new Index("_index", "testUUID");
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);
- AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(),
- Collections.emptyMap(), Collections.emptyMap());
+ NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer());
+ IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer,
+ Collections.emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
- MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService,
+ MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService,
new IndicesModule(emptyList()).getMapperRegistry(), () -> null);
XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type")
.startObject("properties")
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java
new file mode 100644
index 0000000000..a76d5d0131
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperService.MergeReason;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+
+import java.util.Arrays;
+
+public class RootObjectMapperTests extends ESSingleNodeTestCase {
+
+ public void testNumericDetection() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("numeric_detection", false)
+ .endObject()
+ .endObject().string();
+ MapperService mapperService = createIndex("test").mapperService();
+ DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ // update with a different explicit value
+ String mapping2 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("numeric_detection", true)
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping2, mapper.mappingSource().toString());
+
+ // update with an implicit value: no change
+ String mapping3 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping2, mapper.mappingSource().toString());
+ }
+
+ public void testDateDetection() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("date_detection", true)
+ .endObject()
+ .endObject().string();
+ MapperService mapperService = createIndex("test").mapperService();
+ DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ // update with a different explicit value
+ String mapping2 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("date_detection", false)
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping2, mapper.mappingSource().toString());
+
+ // update with an implicit value: no change
+ String mapping3 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping2, mapper.mappingSource().toString());
+ }
+
+ public void testDateFormatters() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("dynamic_date_formats", Arrays.asList("YYYY-MM-dd"))
+ .endObject()
+ .endObject().string();
+ MapperService mapperService = createIndex("test").mapperService();
+ DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ // no update if formatters are not set explicitly
+ String mapping2 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ String mapping3 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("dynamic_date_formats", Arrays.asList())
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping3, mapper.mappingSource().toString());
+ }
+
+ public void testDynamicTemplates() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startArray("dynamic_templates")
+ .startObject()
+ .startObject("my_template")
+ .field("match_mapping_type", "string")
+ .startObject("mapping")
+ .field("type", "keyword")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject().string();
+ MapperService mapperService = createIndex("test").mapperService();
+ DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ // no update if templates are not set explicitly
+ String mapping2 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ String mapping3 = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .field("dynamic_templates", Arrays.asList())
+ .endObject()
+ .endObject().string();
+ mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
+ assertEquals(mapping3, mapper.mappingSource().toString());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java
index e72bb631ad..3556cea23a 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java
@@ -21,18 +21,29 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
+import java.util.Collection;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
public class ScaledFloatFieldMapperTests extends ESSingleNodeTestCase {
@@ -46,6 +57,11 @@ public class ScaledFloatFieldMapperTests extends ESSingleNodeTestCase {
parser = indexService.mapperService().documentMapperParser();
}
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaults() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "scaled_float")
@@ -336,4 +352,27 @@ public class ScaledFloatFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
assertFalse(dvField.fieldType().stored());
}
+
+ public void testEmptyName() throws IOException {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("")
+ .field("type", "scaled_float")
+ .field("scaling_factor", 10.0).endObject().endObject()
+ .endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().toString());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java
index b3cbaa291b..59571b7023 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java
@@ -117,8 +117,8 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase {
IndexSearcher searcher = newSearcher(reader);
final int numQueries = 1000;
for (int i = 0; i < numQueries; ++i) {
- double l = (randomDouble() * 2 - 1) * 10000;
- double u = (randomDouble() * 2 - 1) * 10000;
+ Double l = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
+ Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
boolean includeLower = randomBoolean();
boolean includeUpper = randomBoolean();
Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
index 169a7e1d90..c6f9615623 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
@@ -28,11 +28,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
@@ -87,7 +82,7 @@ public class SourceFieldMapperTests extends ESSingleNodeTestCase {
public void testIncludes() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("_source").field("includes", new String[]{"path1*"}).endObject()
+ .startObject("_source").array("includes", new String[]{"path1*"}).endObject()
.endObject().endObject().string();
DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
@@ -108,7 +103,7 @@ public class SourceFieldMapperTests extends ESSingleNodeTestCase {
public void testExcludes() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("_source").field("excludes", new String[]{"path1*"}).endObject()
+ .startObject("_source").array("excludes", new String[]{"path1*"}).endObject()
.endObject().endObject().string();
DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TTLFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TTLFieldMapperTests.java
index c5d00e57c6..bf51be3c2d 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/TTLFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/TTLFieldMapperTests.java
@@ -28,6 +28,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
@@ -210,20 +211,59 @@ public class TTLFieldMapperTests extends ESSingleNodeTestCase {
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
IndexService indexService = createIndex("testindex", BW_SETTINGS, "type", mappingWithTtlEnabled);
XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
- indexService.mapperService().merge("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), MapperService.MergeReason.MAPPING_UPDATE, false);
+ indexService.mapperService().merge("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()),
+ MapperService.MergeReason.MAPPING_UPDATE, false);
CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
- assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"text\"}}}}")));
+ assertEquals(JsonXContent.contentBuilder().startObject()
+ .startObject("type")
+ .startObject("_ttl")
+ .field("enabled", true)
+ .field("default", 360000)
+ .endObject()
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string(),
+ mappingAfterMerge.string());
}
public void testMergeWithOnlyDefaultSetTtlDisabled() throws Exception {
XContentBuilder mappingWithTtlEnabled = getMappingWithTtlDisabled("7d");
IndexService indexService = createIndex("testindex", BW_SETTINGS, "type", mappingWithTtlEnabled);
CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource();
- assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"text\"}}}}")));
+ assertEquals(JsonXContent.contentBuilder().startObject()
+ .startObject("type")
+ .startObject("_ttl")
+ .field("enabled", false)
+ .endObject()
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string(),
+ mappingAfterCreation.string());
XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
- indexService.mapperService().merge("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), MapperService.MergeReason.MAPPING_UPDATE, false);
+ indexService.mapperService().merge("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()),
+ MapperService.MergeReason.MAPPING_UPDATE, false);
CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
- assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"text\"}}}}")));
+ assertEquals(JsonXContent.contentBuilder().startObject()
+ .startObject("type")
+ .startObject("_ttl")
+ .field("enabled", false)
+ .endObject()
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string(),
+ mappingAfterMerge.string());
}
public void testIncludeInObjectNotAllowed() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
index f57c2810a6..846d2c5666 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
@@ -28,29 +28,32 @@ import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
-import java.util.Collections;
import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
@@ -65,6 +68,11 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
parser = indexService.mapperService().documentMapperParser();
}
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
public void testDefaults() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "text").endObject().endObject()
@@ -200,7 +208,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
- .field("field", new String[] {"a", "b"})
+ .array("field", new String[] {"a", "b"})
.endObject()
.bytes());
@@ -239,7 +247,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
- .field("field", new String[] {"a", "b"})
+ .array("field", new String[] {"a", "b"})
.endObject()
.bytes());
@@ -549,4 +557,39 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
assertEquals("Cannot set position_increment_gap on field [field] without positions enabled", e.getMessage());
}
}
+
+ public void testEmptyName() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("")
+ .field("type", "text")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ // Empty name not allowed in index created after 5.0
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // empty name allowed in index created before 5.0
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ parser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ String downgradedMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("")
+ .field("type", "string")
+ .field("fielddata", false)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ assertEquals(downgradedMapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java
index 67d96f988c..835295def4 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java
@@ -24,17 +24,24 @@ import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.TokenCountFieldMapper;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
/**
@@ -94,4 +101,38 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
};
assertThat(TokenCountFieldMapper.countPositions(analyzer, "", ""), equalTo(7));
}
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
+ public void testEmptyName() throws IOException {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ // Empty name not allowed in index created after 5.0
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // empty name allowed in index created before 5.0
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ indexService = createIndex("test_old", oldIndexSettings);
+ DocumentMapperParser parser2x = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser2x.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
index 5e91126117..5e63c3868a 100644
--- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
@@ -365,12 +365,22 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
* test that two queries in object throws error
*/
public void testTooManyQueriesInObject() throws IOException {
- String clauseType = randomFrom(new String[] {"must", "should", "must_not", "filter"});
+ String clauseType = randomFrom("must", "should", "must_not", "filter");
// should also throw error if invalid query is preceded by a valid one
- String query = "{\"bool\" : {\"" + clauseType
- + "\" : { \"match\" : { \"foo\" : \"bar\" } , \"match\" : { \"baz\" : \"buzz\" } } } }";
+ String query = "{\n" +
+ " \"bool\": {\n" +
+ " \"" + clauseType + "\": {\n" +
+ " \"match\": {\n" +
+ " \"foo\": \"bar\"\n" +
+ " },\n" +
+ " \"match\": {\n" +
+ " \"baz\": \"buzz\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
ParsingException ex = expectThrows(ParsingException.class, () -> parseQuery(query, ParseFieldMatcher.EMPTY));
- assertEquals("expected [END_OBJECT] but got [FIELD_NAME], possibly too many query clauses", ex.getMessage());
+ assertEquals("[match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", ex.getMessage());
}
public void testRewrite() throws IOException {
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java
index c9f55fe999..707e439fc8 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java
@@ -31,7 +31,7 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxQuery;
+import org.elasticsearch.index.search.geo.LegacyInMemoryGeoBoundingBoxQuery;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.geo.RandomShapeGenerator;
import org.locationtech.spatial4j.io.GeohashUtils;
@@ -242,7 +242,8 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
}
}
} else {
- assertTrue("memory queries should result in InMemoryGeoBoundingBoxQuery", query instanceof InMemoryGeoBoundingBoxQuery);
+ assertTrue("memory queries should result in LegacyInMemoryGeoBoundingBoxQuery",
+ query instanceof LegacyInMemoryGeoBoundingBoxQuery);
}
}
}
@@ -253,7 +254,8 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
}
@Override
- protected void doAssertLuceneQuery(GeoBoundingBoxQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
+ protected void doAssertLuceneQuery(GeoBoundingBoxQueryBuilder queryBuilder, Query query, QueryShardContext context)
+ throws IOException {
MappedFieldType fieldType = context.fieldMapper(queryBuilder.fieldName());
if (fieldType == null) {
assertTrue("Found no indexed geo query.", query instanceof MatchNoDocsQuery);
@@ -262,10 +264,12 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
if (queryBuilder.type() == GeoExecType.INDEXED) {
assertTrue("Found no indexed geo query.", query instanceof ConstantScoreQuery);
} else {
- assertTrue("Found no indexed geo query.", query instanceof InMemoryGeoBoundingBoxQuery);
+ assertTrue("Found no indexed geo query.", query instanceof LegacyInMemoryGeoBoundingBoxQuery);
}
- } else {
+ } else if (context.indexVersionCreated().before(Version.V_5_0_0_beta1)) {
assertTrue("Found no indexed geo query.", query instanceof GeoPointInBBoxQuery);
+ } else {
+ assertTrue("Found no indexed geo query.", query instanceof Query);
}
}
}
@@ -421,13 +425,13 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
QueryShardContext shardContext = createShardContext();
Query parsedQuery = parseQuery(query).toQuery(shardContext);
if (shardContext.indexVersionCreated().before(Version.V_2_2_0)) {
- InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) parsedQuery;
+ LegacyInMemoryGeoBoundingBoxQuery filter = (LegacyInMemoryGeoBoundingBoxQuery) parsedQuery;
assertThat(filter.fieldName(), equalTo(GEO_POINT_FIELD_NAME));
assertThat(filter.topLeft().lat(), closeTo(40, 1E-5));
assertThat(filter.topLeft().lon(), closeTo(-70, 1E-5));
assertThat(filter.bottomRight().lat(), closeTo(30, 1E-5));
assertThat(filter.bottomRight().lon(), closeTo(-80, 1E-5));
- } else {
+ } else if (shardContext.indexVersionCreated().before(Version.V_5_0_0_beta1)) {
GeoPointInBBoxQuery q = (GeoPointInBBoxQuery) parsedQuery;
assertThat(q.getField(), equalTo(GEO_POINT_FIELD_NAME));
assertThat(q.getMaxLat(), closeTo(40, 1E-5));
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
index 91bb90dccb..e3045a53b3 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
+import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;
@@ -28,6 +29,7 @@ import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.geo.RandomShapeGenerator;
@@ -72,10 +74,6 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
}
if (randomBoolean()) {
- qb.optimizeBbox(randomFrom("none", "memory", "indexed"));
- }
-
- if (randomBoolean()) {
qb.geoDistance(randomFrom(GeoDistance.values()));
}
@@ -118,9 +116,6 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
e = expectThrows(IllegalArgumentException.class, () -> query.geoDistance(null));
assertEquals("geoDistance must not be null", e.getMessage());
-
- e = expectThrows(IllegalArgumentException.class, () -> query.optimizeBbox(null));
- assertEquals("optimizeBbox must not be null", e.getMessage());
}
/**
@@ -162,18 +157,21 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
}
private void assertGeoPointQuery(GeoDistanceQueryBuilder queryBuilder, Query query) throws IOException {
- assertThat(query, instanceOf(GeoPointDistanceQuery.class));
- GeoPointDistanceQuery geoQuery = (GeoPointDistanceQuery) query;
- assertThat(geoQuery.getField(), equalTo(queryBuilder.fieldName()));
- if (queryBuilder.point() != null) {
- assertThat(geoQuery.getCenterLat(), equalTo(queryBuilder.point().lat()));
- assertThat(geoQuery.getCenterLon(), equalTo(queryBuilder.point().lon()));
- }
- double distance = queryBuilder.distance();
- if (queryBuilder.geoDistance() != null) {
- distance = queryBuilder.geoDistance().normalize(distance, DistanceUnit.DEFAULT);
- distance = org.elasticsearch.common.geo.GeoUtils.maxRadialDistance(queryBuilder.point(), distance);
- assertThat(geoQuery.getRadiusMeters(), closeTo(distance, GeoUtils.TOLERANCE));
+ Version version = createShardContext().indexVersionCreated();
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ assertThat(query, instanceOf(GeoPointDistanceQuery.class));
+ GeoPointDistanceQuery geoQuery = (GeoPointDistanceQuery) query;
+ assertThat(geoQuery.getField(), equalTo(queryBuilder.fieldName()));
+ if (queryBuilder.point() != null) {
+ assertThat(geoQuery.getCenterLat(), equalTo(queryBuilder.point().lat()));
+ assertThat(geoQuery.getCenterLon(), equalTo(queryBuilder.point().lon()));
+ }
+ double distance = queryBuilder.distance();
+ if (queryBuilder.geoDistance() != null) {
+ distance = queryBuilder.geoDistance().normalize(distance, DistanceUnit.DEFAULT);
+ distance = org.elasticsearch.common.geo.GeoUtils.maxRadialDistance(queryBuilder.point(), distance);
+ assertThat(geoQuery.getRadiusMeters(), closeTo(distance, GeoUtils.TOLERANCE));
+ }
}
}
@@ -352,7 +350,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
assertThat(q.lon(), closeTo(lon, 1E-5D));
assertThat(q.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
assertThat(q.maxInclusiveDistance(), closeTo(distanceUnit.convert(distance, DistanceUnit.MILES), 1E-5D));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
GeoPointDistanceQuery q = (GeoPointDistanceQuery) parsedQuery;
assertThat(q.getField(), equalTo(GEO_POINT_FIELD_NAME));
assertThat(q.getCenterLat(), closeTo(lat, 1E-5D));
@@ -368,7 +366,6 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
" \"pin.location\" : [ -70.0, 40.0 ],\n" +
" \"distance\" : 12000.0,\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"validation_method\" : \"STRICT\",\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
@@ -381,6 +378,23 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
assertEquals(json, 12000.0, parsed.distance(), 0.0001);
}
+ public void testOptimizeBboxFails() throws IOException {
+ String json =
+ "{\n" +
+ " \"geo_distance\" : {\n" +
+ " \"pin.location\" : [ -70.0, 40.0 ],\n" +
+ " \"distance\" : 12000.0,\n" +
+ " \"distance_type\" : \"sloppy_arc\",\n" +
+ " \"optimize_bbox\" : \"memory\",\n" +
+ " \"validation_method\" : \"STRICT\",\n" +
+ " \"ignore_unmapped\" : false,\n" +
+ " \"boost\" : 1.0\n" +
+ " }\n" +
+ "}";
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(json));
+ assertTrue(e.getMessage().startsWith("Deprecated field "));
+ }
+
public void testFromCoerceFails() throws IOException {
String json =
"{\n" +
@@ -388,7 +402,6 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
" \"pin.location\" : [ -70.0, 40.0 ],\n" +
" \"distance\" : 12000.0,\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"coerce\" : true,\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
@@ -405,7 +418,6 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
" \"pin.location\" : [ -70.0, 40.0 ],\n" +
" \"distance\" : 12000.0,\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"ignore_malformed\" : true,\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java
index 53f3e71a0f..f47b10108e 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java
@@ -29,6 +29,7 @@ import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
import org.elasticsearch.test.AbstractQueryTestCase;
@@ -46,7 +47,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
@Override
protected GeoDistanceRangeQueryBuilder doCreateTestQueryBuilder() {
- Version version = createShardContext().indexVersionCreated();
GeoDistanceRangeQueryBuilder builder;
GeoPoint randomPoint = RandomGeoGenerator.randomPointIn(random(), -180.0, -89.9, 180.0, 89.9);
if (randomBoolean()) {
@@ -106,9 +106,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
if (randomBoolean()) {
builder.geoDistance(randomFrom(GeoDistance.values()));
}
- if (randomBoolean() && version.before(Version.V_2_2_0)) {
- builder.optimizeBbox(randomFrom("none", "memory", "indexed"));
- }
builder.unit(fromToUnits);
if (randomBoolean()) {
builder.setValidationMethod(randomFrom(GeoValidationMethod.values()));
@@ -208,7 +205,9 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
@Override
public void testToQuery() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
- super.testToQuery();
+ if (createShardContext().indexVersionCreated().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ super.testToQuery();
+ }
}
public void testNullFieldName() {
@@ -245,14 +244,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
assertEquals("[to] must not be null", e.getMessage());
}
- public void testInvalidOptimizeBBox() {
- GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox(null));
- assertEquals("optimizeBbox must not be null", e.getMessage());
- e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox("foo"));
- assertEquals("optimizeBbox must be one of [none, memory, indexed]", e.getMessage());
- }
-
public void testInvalidGeoDistance() {
GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.geoDistance(null));
@@ -266,6 +257,11 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
}
public void testNestedRangeQuery() throws IOException {
+ // geo distance range queries are no longer supported in 5.0 they are replaced by using aggregations or sort
+ if (createShardContext().indexVersionCreated().onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ return;
+ }
+
// create a nested geo_point type with a subfield named "geohash" (explicit testing for ISSUE #15179)
MapperService mapperService = createShardContext().getMapperService();
String nestedMapping =
@@ -306,7 +302,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
" \"include_upper\" : true,\n" +
" \"unit\" : \"m\",\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"validation_method\" : \"STRICT\",\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
@@ -317,6 +312,26 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
assertEquals(json, -70.0, parsed.point().lon(), 0.0001);
}
+ public void testFromJsonOptimizeBboxFails() throws IOException {
+ String json =
+ "{\n" +
+ " \"geo_distance_range\" : {\n" +
+ " \"pin.location\" : [ -70.0, 40.0 ],\n" +
+ " \"from\" : \"200km\",\n" +
+ " \"to\" : \"400km\",\n" +
+ " \"include_lower\" : true,\n" +
+ " \"include_upper\" : true,\n" +
+ " \"unit\" : \"m\",\n" +
+ " \"distance_type\" : \"sloppy_arc\",\n" +
+ " \"optimize_bbox\" : \"memory\",\n" +
+ " \"ignore_unmapped\" : false,\n" +
+ " \"boost\" : 1.0\n" +
+ " }\n" +
+ "}";
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(json));
+ assertTrue(e.getMessage().startsWith("Deprecated field "));
+ }
+
public void testFromJsonCoerceFails() throws IOException {
String json =
"{\n" +
@@ -328,7 +343,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
" \"include_upper\" : true,\n" +
" \"unit\" : \"m\",\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"coerce\" : true,\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
@@ -349,7 +363,6 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
" \"include_upper\" : true,\n" +
" \"unit\" : \"m\",\n" +
" \"distance_type\" : \"sloppy_arc\",\n" +
- " \"optimize_bbox\" : \"memory\",\n" +
" \"ignore_malformed\" : true,\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
@@ -362,7 +375,9 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
@Override
public void testMustRewrite() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
- super.testMustRewrite();
+ if (createShardContext().indexVersionCreated().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ super.testMustRewrite();
+ }
}
public void testIgnoreUnmapped() throws IOException {
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java
index dc518b9b76..8f28d60206 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java
@@ -20,6 +20,8 @@
package org.elasticsearch.index.query;
import com.vividsolutions.jts.geom.Coordinate;
+import org.apache.lucene.document.LatLonPoint;
+import org.apache.lucene.geo.Polygon;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery;
@@ -30,6 +32,7 @@ import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.search.geo.GeoPolygonQuery;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.geo.RandomShapeGenerator;
@@ -68,9 +71,10 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase<GeoPolygo
Version version = context.indexVersionCreated();
if (version.before(Version.V_2_2_0)) {
assertLegacyQuery(queryBuilder, query);
- } else {
+ } else if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
assertGeoPointQuery(queryBuilder, query);
}
+ // todo LatLonPointInPolygon is package private
}
private void assertLegacyQuery(GeoPolygonQueryBuilder queryBuilder, Query query) {
@@ -121,7 +125,9 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase<GeoPolygo
@Override
public void testToQuery() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
- super.testToQuery();
+ if (createShardContext().indexVersionCreated().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ super.testToQuery();
+ }
}
private static List<GeoPoint> randomPolygon() {
@@ -294,7 +300,7 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase<GeoPolygo
assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
GeoPointInPolygonQuery q = (GeoPointInPolygonQuery) parsedQuery;
assertThat(q.getField(), equalTo(GEO_POINT_FIELD_NAME));
assertEquals(1, q.getPolygons().length);
@@ -310,6 +316,9 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase<GeoPolygo
assertThat(lons[2], closeTo(-90, 1E-5));
assertThat(lats[3], equalTo(lats[0]));
assertThat(lons[3], equalTo(lons[0]));
+ } else {
+ // todo LatLonPointInPolygon is package private, need a closeTo check on the query
+ // since some points can be computed from the geohash
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java
index e9e4a71190..a7af6aea30 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java
@@ -24,10 +24,12 @@ import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.query.GeohashCellQuery.Builder;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.geo.RandomShapeGenerator;
@@ -87,7 +89,10 @@ public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase<Builder>
@Override
public void testToQuery() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
- super.testToQuery();
+ Version version = createShardContext().indexVersionCreated();
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ super.testToQuery();
+ }
}
public void testNullField() {
@@ -140,7 +145,10 @@ public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase<Builder>
@Override
public void testMustRewrite() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
- super.testMustRewrite();
+ Version version = createShardContext().indexVersionCreated();
+ if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
+ super.testMustRewrite();
+ }
}
public void testIgnoreUnmapped() throws IOException {
diff --git a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java
index cde6500d60..5f8c847e41 100644
--- a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java
@@ -18,20 +18,6 @@
*/
package org.elasticsearch.index.query;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.not;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.sameInstance;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Supplier;
-
-import static java.util.Collections.emptyList;
-
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@@ -60,6 +46,21 @@ import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import static java.util.Collections.emptyList;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
public class InnerHitBuilderTests extends ESTestCase {
private static final int NUMBER_OF_TESTBUILDERS = 20;
@@ -221,7 +222,9 @@ public class InnerHitBuilderTests extends ESTestCase {
innerHits.setExplain(randomBoolean());
innerHits.setVersion(randomBoolean());
innerHits.setTrackScores(randomBoolean());
- innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)));
+ if (randomBoolean()) {
+ innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)));
+ }
innerHits.setDocValueFields(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)));
// Random script fields deduped on their field name.
Map<String, SearchSourceBuilder.ScriptField> scriptFields = new HashMap<>();
@@ -344,12 +347,14 @@ public class InnerHitBuilderTests extends ESTestCase {
HighlightBuilderTests::randomHighlighterBuilder));
break;
case 11:
- if (instance.getStoredFieldNames() == null || randomBoolean()) {
- instance.setStoredFieldNames(randomValueOtherThan(instance.getStoredFieldNames(), () -> {
- return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16));
- }));
+ if (instance.getStoredFieldsContext() == null || randomBoolean()) {
+ List<String> previous = instance.getStoredFieldsContext() == null ?
+ Collections.emptyList() : instance.getStoredFieldsContext().fieldNames();
+ List<String> newValues = randomValueOtherThan(previous,
+ () -> randomListStuff(1, 16, () -> randomAsciiOfLengthBetween(1, 16)));
+ instance.setStoredFieldNames(newValues);
} else {
- instance.getStoredFieldNames().add(randomAsciiOfLengthBetween(1, 16));
+ instance.getStoredFieldsContext().addFieldName(randomAsciiOfLengthBetween(1, 16));
}
break;
default:
@@ -373,7 +378,11 @@ public class InnerHitBuilderTests extends ESTestCase {
}
static <T> List<T> randomListStuff(int maxSize, Supplier<T> valueSupplier) {
- int size = randomIntBetween(0, maxSize);
+ return randomListStuff(0, maxSize, valueSupplier);
+ }
+
+ static <T> List<T> randomListStuff(int minSize, int maxSize, Supplier<T> valueSupplier) {
+ int size = randomIntBetween(minSize, maxSize);
List<T> list = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
list.add(valueSupplier.get());
diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java
index ea6fa65ddd..9cc19928eb 100644
--- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java
@@ -431,4 +431,22 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
e = expectThrows(ParsingException.class, () -> parseQuery(shortJson));
assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage());
}
+
+ public void testParseFailsWithTermsArray() throws Exception {
+ String json1 = "{\n" +
+ " \"match\" : {\n" +
+ " \"message1\" : {\n" +
+ " \"query\" : [\"term1\", \"term2\"]\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ expectThrows(ParsingException.class, () -> parseQuery(json1));
+
+ String json2 = "{\n" +
+ " \"match\" : {\n" +
+ " \"message1\" : [\"term1\", \"term2\"]\n" +
+ " }\n" +
+ "}";
+ expectThrows(IllegalStateException.class, () -> parseQuery(json2));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java
index f122d66ebe..e6bda8cec2 100644
--- a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java
@@ -25,8 +25,10 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
import org.elasticsearch.search.internal.SearchContext;
@@ -49,6 +51,9 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
@Override
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
+ String geoFieldMapping = (mapperService.getIndexSettings().getIndexVersionCreated()
+ .before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) ?
+ LEGACY_GEO_POINT_FIELD_MAPPING : "type=geo_point";
mapperService.merge("nested_doc", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("nested_doc",
STRING_FIELD_NAME, "type=text",
INT_FIELD_NAME, "type=integer",
@@ -56,7 +61,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object",
- GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING,
+ GEO_POINT_FIELD_NAME, geoFieldMapping,
"nested1", "type=nested"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java
new file mode 100644
index 0000000000..a39fbae176
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.script.ScriptSettings;
+import org.elasticsearch.search.SearchModule;
+import org.elasticsearch.test.ESTestCase;
+
+import static java.util.Collections.emptyList;
+
+public class QueryRewriteContextTests extends ESTestCase {
+
+ public void testNewParseContextWithLegacyScriptLanguage() throws Exception {
+ String defaultLegacyScriptLanguage = randomAsciiOfLength(4);
+ IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder("index");
+ indexMetadata.settings(Settings.builder().put("index.version.created", Version.CURRENT)
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ );
+ IndicesQueriesRegistry indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry();
+ IndexSettings indexSettings = new IndexSettings(indexMetadata.build(),
+ Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLegacyScriptLanguage).build());
+ QueryRewriteContext queryRewriteContext =
+ new QueryRewriteContext(indexSettings, null, null, indicesQueriesRegistry, null, null, null);;
+
+ // verify that the default script language in the query parse context is equal to defaultLegacyScriptLanguage variable:
+ QueryParseContext queryParseContext =
+ queryRewriteContext.newParseContextWithLegacyScriptLanguage(XContentHelper.createParser(new BytesArray("{}")));
+ assertEquals(defaultLegacyScriptLanguage, queryParseContext.getDefaultScriptLanguage());
+
+ // verify that the script query's script language is equal to defaultLegacyScriptLanguage variable:
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{\"script\" : {\"script\": \"return true\"}}"));
+ queryParseContext = queryRewriteContext.newParseContextWithLegacyScriptLanguage(parser);
+ ScriptQueryBuilder queryBuilder = (ScriptQueryBuilder) queryParseContext.parseInnerQueryBuilder().get();
+ assertEquals(defaultLegacyScriptLanguage, queryBuilder.script().getLang());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
index be77ba0073..ce49f18ccf 100644
--- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
@@ -26,15 +26,16 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
+import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RegexpQuery;
-import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.SynonymQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
import org.elasticsearch.common.lucene.all.AllTermQuery;
import org.elasticsearch.common.unit.Fuzziness;
@@ -390,6 +391,32 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
assertThat(e.getMessage(), containsString("would result in more than 10000 states"));
}
+ public void testToQueryFuzzyQueryAutoFuziness() throws Exception {
+ assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
+
+ int length = randomIntBetween(1, 10);
+ StringBuilder queryString = new StringBuilder();
+ for (int i = 0; i < length; i++) {
+ queryString.append("a");
+ }
+ queryString.append("~");
+
+ int expectedEdits;
+ if (length <= 2) {
+ expectedEdits = 0;
+ } else if (3 <= length && length <= 5) {
+ expectedEdits = 1;
+ } else {
+ expectedEdits = 2;
+ }
+
+ Query query = queryStringQuery(queryString.toString()).defaultField(STRING_FIELD_NAME).fuzziness(Fuzziness.AUTO)
+ .toQuery(createShardContext());
+ assertThat(query, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) query;
+ assertEquals(expectedEdits, fuzzyQuery.getMaxEdits());
+ }
+
public void testFuzzyNumeric() throws Exception {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
QueryStringQueryBuilder query = queryStringQuery("12~0.2").defaultField(INT_FIELD_NAME);
diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java
index b4fda42177..5ee538126d 100644
--- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java
@@ -501,4 +501,21 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase<RangeQueryBuil
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
assertEquals("[range] query doesn't support multiple fields, found [age] and [price]", e.getMessage());
}
+
+ public void testParseFailsWithMultipleFieldsWhenOneIsDate() throws IOException {
+ String json =
+ "{\n" +
+ " \"range\": {\n" +
+ " \"age\": {\n" +
+ " \"gte\": 30,\n" +
+ " \"lte\": 40\n" +
+ " },\n" +
+ " \"" + DATE_FIELD_NAME + "\": {\n" +
+ " \"gte\": \"2016-09-13 05:01:14\"\n" +
+ " }\n" +
+ " }\n" +
+ " }";
+ ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
+ assertEquals("[range] query doesn't support multiple fields, found [age] and [" + DATE_FIELD_NAME + "]", e.getMessage());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java
index f2f04d04bb..6be23412d8 100644
--- a/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java
@@ -24,11 +24,13 @@ import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
import java.util.Iterator;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
@@ -112,8 +114,7 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase<SpanNearQue
assertEquals(json, false, parsed.inOrder());
}
- public void testCollectPayloadsDeprecated() throws Exception {
- assertEquals("We can remove support for ignoring collect_payloads in 6.0.0", 5, Version.CURRENT.major);
+ public void testCollectPayloadsNoLongerSupported() throws Exception {
String json =
"{\n" +
" \"span_near\" : {\n" +
@@ -146,6 +147,9 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase<SpanNearQue
" }\n" +
"}";
- parseQuery(json, ParseFieldMatcher.EMPTY); // Just don't throw an error and we're fine
+ final ParsingException e = expectThrows(
+ ParsingException.class,
+ () -> parseQuery(json, ParseFieldMatcher.EMPTY));
+ assertThat(e.getMessage(), containsString("[span_near] query does not support [collect_payloads]"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java
index 8c069a67a7..1c220f172a 100644
--- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreEquivalenceTests.java
@@ -24,6 +24,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomApproximationQuery;
import org.apache.lucene.search.SearchEquivalenceTestBase;
import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.common.lucene.search.function.CombineFunction;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery.FilterFunction;
@@ -31,6 +32,14 @@ import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
public class FunctionScoreEquivalenceTests extends SearchEquivalenceTestBase {
+ static {
+ try {
+ Class.forName("org.elasticsearch.test.ESTestCase");
+ } catch (ClassNotFoundException e) {
+ throw new AssertionError(e);
+ }
+ BootstrapForTesting.ensureInitialized();
+ }
public void testMinScoreAllIncluded() throws Exception {
Term term = randomTerm();
diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java
index c5b31eb6a9..0eab330e36 100644
--- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java
@@ -700,7 +700,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase<Functi
expectParsingException(json, "field [not_supported] is not supported");
}
- public void testMalformedQuery() throws IOException {
+ public void testMalformedQueryMultipleQueryObjects() throws IOException {
//verify that an error is thrown rather than setting the query twice (https://github.com/elastic/elasticsearch/issues/16583)
String json = "{\n" +
" \"function_score\":{\n" +
@@ -715,15 +715,34 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase<Functi
" }\n" +
" }\n" +
"}";
+ expectParsingException(json, equalTo("[bool] malformed query, expected [END_OBJECT] but found [FIELD_NAME]"));
+ }
+
+ public void testMalformedQueryMultipleQueryElements() throws IOException {
+ String json = "{\n" +
+ " \"function_score\":{\n" +
+ " \"query\":{\n" +
+ " \"bool\":{\n" +
+ " \"must\":{\"match\":{\"field\":\"value\"}}" +
+ " }\n" +
+ " },\n" +
+ " \"query\":{\n" +
+ " \"bool\":{\n" +
+ " \"must\":{\"match\":{\"field\":\"value\"}}" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
expectParsingException(json, "[query] is already defined.");
}
- private void expectParsingException(String json, Matcher<String> messageMatcher) {
+ private static void expectParsingException(String json, Matcher<String> messageMatcher) {
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
assertThat(e.getMessage(), messageMatcher);
}
- private void expectParsingException(String json, String message) {
+ private static void expectParsingException(String json, String message) {
expectParsingException(json, equalTo("failed to parse [function_score] query. " + message));
}
diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
index 4bfe088550..6e200c4756 100644
--- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
@@ -18,15 +18,7 @@
*/
package org.elasticsearch.index.replication;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexNotFoundException;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.IOUtils;
-import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
@@ -36,55 +28,29 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.replication.ReplicationOperation;
+import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportWriteAction;
+import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
-import org.elasticsearch.cluster.routing.ShardRoutingState;
-import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.collect.Iterators;
-import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
-import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.cache.IndexCache;
-import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.engine.Engine;
-import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Uid;
-import org.elasticsearch.index.mapper.UidFieldMapper;
-import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.IndexShardTestCase;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.shard.ShardPath;
-import org.elasticsearch.index.similarity.SimilarityService;
-import org.elasticsearch.index.store.DirectoryService;
-import org.elasticsearch.index.store.Store;
-import org.elasticsearch.indices.recovery.RecoveryFailedException;
-import org.elasticsearch.indices.recovery.RecoverySourceHandler;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
-import org.elasticsearch.indices.recovery.StartRecoveryRequest;
-import org.elasticsearch.test.DummyShardLock;
-import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.junit.annotations.TestLogging;
-import org.elasticsearch.threadpool.TestThreadPool;
-import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
@@ -92,10 +58,8 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import java.util.function.Consumer;
@@ -105,98 +69,24 @@ import java.util.stream.StreamSupport;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
-public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
+public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase {
- protected ThreadPool threadPool;
- private final Index index = new Index("test", "uuid");
+ protected final Index index = new Index("test", "uuid");
private final ShardId shardId = new ShardId(index, 0);
private final Map<String, String> indexMapping = Collections.singletonMap("type", "{ \"type\": {} }");
- protected static final RecoveryTargetService.RecoveryListener recoveryListener = new RecoveryTargetService.RecoveryListener() {
- @Override
- public void onRecoveryDone(RecoveryState state) {
-
- }
-
- @Override
- public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
- fail(ExceptionsHelper.detailedMessage(e));
- }
- };
-
-
- @TestLogging("index.shard:TRACE,index.replication:TRACE,indices.recovery:TRACE")
- public void testIndexingDuringFileRecovery() throws Exception {
- try (ReplicationGroup shards = createGroup(randomInt(1))) {
- shards.startAll();
- int docs = shards.indexDocs(randomInt(50));
- shards.flush();
- IndexShard replica = shards.addReplica();
- final CountDownLatch recoveryBlocked = new CountDownLatch(1);
- final CountDownLatch releaseRecovery = new CountDownLatch(1);
- final Future<Void> recoveryFuture = shards.asyncRecoverReplica(replica,
- new BiFunction<IndexShard, DiscoveryNode, RecoveryTarget>() {
- @Override
- public RecoveryTarget apply(IndexShard indexShard, DiscoveryNode node) {
- return new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) {
- @Override
- public void renameAllTempFiles() throws IOException {
- super.renameAllTempFiles();
- recoveryBlocked.countDown();
- try {
- releaseRecovery.await();
- } catch (InterruptedException e) {
- throw new IOException("terminated by interrupt", e);
- }
- }
- };
- }
- });
-
- recoveryBlocked.await();
- docs += shards.indexDocs(randomInt(20));
- releaseRecovery.countDown();
- recoveryFuture.get();
-
- shards.assertAllEqual(docs);
- }
- }
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- threadPool = new TestThreadPool(getClass().getName());
- }
-
- @Override
- public void tearDown() throws Exception {
- super.tearDown();
- ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
- }
-
- private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
- final ShardId shardId = shardPath.getShardId();
- final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
- @Override
- public Directory newDirectory() throws IOException {
- return newFSDirectory(shardPath.resolveIndex());
- }
-
- @Override
- public long throttleTimeInNanos() {
- return 0;
- }
- };
- return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
- }
protected ReplicationGroup createGroup(int replicas) throws IOException {
- final Path homePath = createTempDir();
- Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
- IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).primaryTerm(0, 1).build();
- return new ReplicationGroup(metaData, homePath);
+ IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
+ .settings(settings)
+ .primaryTerm(0, 1);
+ for (Map.Entry<String, String> typeMapping : indexMapping.entrySet()) {
+ metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
+ }
+ return new ReplicationGroup(metaData.build());
}
protected DiscoveryNode getDiscoveryNode(String id) {
@@ -204,49 +94,22 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
}
- private IndexShard newShard(boolean primary, DiscoveryNode node, IndexMetaData indexMetaData, Path homePath) throws IOException {
- // add node name to settings for propper logging
- final Settings nodeSettings = Settings.builder().put("node.name", node.getName()).build();
- final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
- ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, node.getId(), primary, ShardRoutingState.INITIALIZING);
- final Path path = Files.createDirectories(homePath.resolve(node.getId()));
- final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path);
- ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
- Store store = createStore(indexSettings, shardPath);
- IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
- MapperService mapperService = MapperTestUtils.newMapperService(homePath, indexSettings.getSettings());
- for (Map.Entry<String, String> type : indexMapping.entrySet()) {
- mapperService.merge(type.getKey(), new CompressedXContent(type.getValue()), MapperService.MergeReason.MAPPING_RECOVERY, true);
- }
- SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
- final IndexEventListener indexEventListener = new IndexEventListener() {
- };
- final Engine.Warmer warmer = searcher -> {
- };
- return new IndexShard(shardRouting, indexSettings, shardPath, store, indexCache, mapperService, similarityService, null, null,
- indexEventListener, null, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(),
- Collections.emptyList());
- }
-
protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {
private final IndexShard primary;
private final List<IndexShard> replicas;
private final IndexMetaData indexMetaData;
- private final Path homePath;
private final AtomicInteger replicaId = new AtomicInteger();
private final AtomicInteger docId = new AtomicInteger();
boolean closed = false;
- ReplicationGroup(final IndexMetaData indexMetaData, Path homePath) throws IOException {
- primary = newShard(true, getDiscoveryNode("s0"), indexMetaData, homePath);
+ ReplicationGroup(final IndexMetaData indexMetaData) throws IOException {
+ primary = newShard(shardId, true, "s0", indexMetaData, null);
replicas = new ArrayList<>();
this.indexMetaData = indexMetaData;
- this.homePath = homePath;
for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) {
addReplica();
}
-
}
public int indexDocs(final int numOfDoc) throws Exception {
@@ -259,67 +122,49 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
return numOfDoc;
}
+ public int appendDocs(final int numOfDoc) throws Exception {
+ for (int doc = 0; doc < numOfDoc; doc++) {
+ final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}");
+ final IndexResponse response = index(indexRequest);
+ assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
+ }
+ return numOfDoc;
+ }
+
public IndexResponse index(IndexRequest indexRequest) throws Exception {
- PlainActionFuture<IndexingResult> listener = new PlainActionFuture<>();
- IndexingOp op = new IndexingOp(indexRequest, listener, this);
- op.execute();
- return listener.get().finalResponse;
+ PlainActionFuture<IndexResponse> listener = new PlainActionFuture<>();
+ new IndexingAction(indexRequest, listener, this).execute();
+ return listener.get();
}
public synchronized void startAll() throws IOException {
final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId());
- primary.markAsRecovering("store", new RecoveryState(primary.shardId(), true, RecoveryState.Type.STORE, pNode, pNode));
+ primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null));
primary.recoverFromStore();
primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry()));
for (IndexShard replicaShard : replicas) {
- recoverReplica(replicaShard,
- (replica, sourceNode) -> new RecoveryTarget(replica, sourceNode, recoveryListener, version -> {}));
+ recoverReplica(replicaShard);
}
}
public synchronized IndexShard addReplica() throws IOException {
- final IndexShard replica = newShard(false, getDiscoveryNode("s" + replicaId.incrementAndGet()), indexMetaData, homePath);
+ final IndexShard replica = newShard(shardId, false, "s" + replicaId.incrementAndGet(), indexMetaData, null);
replicas.add(replica);
return replica;
}
+
+ public void recoverReplica(IndexShard replica) throws IOException {
+ recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> {}));
+ }
+
public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier)
throws IOException {
recoverReplica(replica, targetSupplier, true);
}
public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
- boolean markAsRecovering)
- throws IOException {
- final DiscoveryNode pNode = getPrimaryNode();
- final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId());
- if (markAsRecovering) {
- replica.markAsRecovering("remote", new RecoveryState(replica.shardId(), false, RecoveryState.Type.REPLICA, pNode, rNode));
- } else {
- assertEquals(replica.state(), IndexShardState.RECOVERING);
- }
- replica.prepareForIndexRecovery();
- RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
- StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
- getMetadataSnapshotOrEmpty(replica), RecoveryState.Type.REPLICA, 0);
- RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {},
- (int) ByteSizeUnit.MB.toKB(1), logger);
- recovery.recoverToTarget();
- recoveryTarget.markAsDone();
- replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
- }
-
- private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
- Store.MetadataSnapshot result;
- try {
- result = replica.snapshotStoreMetadata();
- } catch (IndexNotFoundException e) {
- // OK!
- result = Store.MetadataSnapshot.EMPTY;
- } catch (IOException e) {
- logger.warn("failed read store, treating as empty", e);
- result = Store.MetadataSnapshot.EMPTY;
- }
- return result;
+ boolean markAsRecovering) throws IOException {
+ ESIndexLevelReplicationTestCase.this.recoverReplica(replica, primary, targetSupplier, markAsRecovering);
}
public synchronized DiscoveryNode getPrimaryNode() {
@@ -350,24 +195,6 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
}
}
- private Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
- shard.refresh("get_uids");
- try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
- Set<Uid> ids = new HashSet<>();
- for (LeafReaderContext leafContext : searcher.reader().leaves()) {
- LeafReader reader = leafContext.reader();
- Bits liveDocs = reader.getLiveDocs();
- for (int i = 0; i < reader.maxDoc(); i++) {
- if (liveDocs == null || liveDocs.get(i)) {
- Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
- ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
- }
- }
- }
- return ids;
- }
- }
-
public synchronized void refresh(String source) {
for (IndexShard shard : this) {
shard.refresh(source);
@@ -389,10 +216,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
public synchronized void close() throws Exception {
if (closed == false) {
closed = true;
- for (IndexShard shard : this) {
- shard.close("eol", false);
- IOUtils.close(shard.store());
- }
+ closeShards(this);
} else {
throw new AlreadyClosedException("too bad");
}
@@ -400,7 +224,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
@Override
public Iterator<IndexShard> iterator() {
- return Iterators.<IndexShard>concat(replicas.iterator(), Collections.singleton(primary).iterator());
+ return Iterators.concat(replicas.iterator(), Collections.singleton(primary).iterator());
}
public IndexShard getPrimary() {
@@ -408,104 +232,151 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
}
}
- class IndexingOp extends ReplicationOperation<IndexRequest, IndexRequest, IndexingResult> {
-
+ abstract class ReplicationAction<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
+ Response extends ReplicationResponse> {
+ private final Request request;
+ private ActionListener<Response> listener;
private final ReplicationGroup replicationGroup;
+ private final String opType;
+
+ public ReplicationAction(Request request, ActionListener<Response> listener,
+ ReplicationGroup group, String opType) {
+ this.request = request;
+ this.listener = listener;
+ this.replicationGroup = group;
+ this.opType = opType;
+ }
+
+ public void execute() throws Exception {
+ new ReplicationOperation<Request, ReplicaRequest, PrimaryResult>(request, new PrimaryRef(),
+ new ActionListener<PrimaryResult>() {
+ @Override
+ public void onResponse(PrimaryResult result) {
+ result.respond(listener);
+ }
- public IndexingOp(IndexRequest request, ActionListener<IndexingResult> listener, ReplicationGroup replicationGroup) {
- super(request, new PrimaryRef(replicationGroup), listener, true, new ReplicasRef(replicationGroup),
- () -> null, logger, "indexing");
- this.replicationGroup = replicationGroup;
- request.process(null, true, request.index());
- }
+ @Override
+ public void onFailure(Exception e) {
+ listener.onFailure(e);
+ }
+ }, true, new ReplicasRef(), () -> null, logger, opType) {
+ @Override
+ protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
+ return replicationGroup.shardRoutings();
+ }
- @Override
- protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
- return replicationGroup.shardRoutings();
- }
+ @Override
+ protected String checkActiveShardCount() {
+ return null;
+ }
- @Override
- protected String checkActiveShardCount() {
- return null;
+ @Override
+ protected Set<String> getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) {
+ return replicationGroup.shardRoutings().stream().filter(ShardRouting::active).map(r -> r.allocationId().getId())
+ .collect(Collectors.toSet());
+ }
+ }.execute();
}
- }
- private static class PrimaryRef implements ReplicationOperation.Primary<IndexRequest, IndexRequest, IndexingResult> {
- final IndexShard primary;
+ protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception;
- private PrimaryRef(ReplicationGroup replicationGroup) {
- this.primary = replicationGroup.primary;
- }
+ protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica);
- @Override
- public ShardRouting routingEntry() {
- return primary.routingEntry();
- }
+ class PrimaryRef implements ReplicationOperation.Primary<Request, ReplicaRequest, PrimaryResult> {
- @Override
- public void failShard(String message, Exception exception) {
- throw new UnsupportedOperationException();
- }
+ @Override
+ public ShardRouting routingEntry() {
+ return replicationGroup.primary.routingEntry();
+ }
- @Override
- public IndexingResult perform(IndexRequest request) throws Exception {
- TransportWriteAction.WriteResult<IndexResponse> result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary,
- null);
- request.primaryTerm(primary.getPrimaryTerm());
- return new IndexingResult(request, result.getResponse());
+ @Override
+ public void failShard(String message, Exception exception) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public PrimaryResult perform(Request request) throws Exception {
+ PrimaryResult response = performOnPrimary(replicationGroup.primary, request);
+ response.replicaRequest().primaryTerm(replicationGroup.primary.getPrimaryTerm());
+ return response;
+ }
}
- }
+ class ReplicasRef implements ReplicationOperation.Replicas<ReplicaRequest> {
- private static class ReplicasRef implements ReplicationOperation.Replicas<IndexRequest> {
- private final ReplicationGroup replicationGroup;
+ @Override
+ public void performOn(
+ ShardRouting replicaRouting,
+ ReplicaRequest request,
+ ActionListener<TransportResponse.Empty> listener) {
+ try {
+ IndexShard replica = replicationGroup.replicas.stream()
+ .filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
+ performOnReplica(request, replica);
+ listener.onResponse(TransportResponse.Empty.INSTANCE);
+ } catch (Exception e) {
+ listener.onFailure(e);
+ }
+ }
- private ReplicasRef(ReplicationGroup replicationGroup) {
- this.replicationGroup = replicationGroup;
- }
+ @Override
+ public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ throw new UnsupportedOperationException();
+ }
- @Override
- public void performOn(ShardRouting replicaRouting, IndexRequest request, ActionListener<TransportResponse.Empty> listener) {
- try {
- IndexShard replica = replicationGroup.replicas.stream()
- .filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
- TransportIndexAction.executeIndexRequestOnReplica(request, replica);
- listener.onResponse(TransportResponse.Empty.INSTANCE);
- } catch (Exception t) {
- listener.onFailure(t);
+ @Override
+ public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
+ Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
+ throw new UnsupportedOperationException();
}
}
- @Override
- public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
- Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
- throw new UnsupportedOperationException();
- }
- }
+ class PrimaryResult implements ReplicationOperation.PrimaryResult<ReplicaRequest> {
+ final ReplicaRequest replicaRequest;
+ final Response finalResponse;
+ public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) {
+ this.replicaRequest = replicaRequest;
+ this.finalResponse = finalResponse;
+ }
- private static class IndexingResult implements ReplicationOperation.PrimaryResult<IndexRequest> {
- final IndexRequest replicaRequest;
- final IndexResponse finalResponse;
+ @Override
+ public ReplicaRequest replicaRequest() {
+ return replicaRequest;
+ }
- public IndexingResult(IndexRequest replicaRequest, IndexResponse finalResponse) {
- this.replicaRequest = replicaRequest;
- this.finalResponse = finalResponse;
+ @Override
+ public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) {
+ finalResponse.setShardInfo(shardInfo);
+ }
+
+ public void respond(ActionListener<Response> listener) {
+ listener.onResponse(finalResponse);
+ }
}
+ }
- @Override
- public IndexRequest replicaRequest() {
- return replicaRequest;
+ class IndexingAction extends ReplicationAction<IndexRequest, IndexRequest, IndexResponse> {
+
+ public IndexingAction(IndexRequest request, ActionListener<IndexResponse> listener, ReplicationGroup replicationGroup) {
+ super(request, listener, replicationGroup, "indexing");
+ request.process(null, true, request.index());
}
@Override
- public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) {
- finalResponse.setShardInfo(shardInfo);
+ protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception {
+ TransportWriteAction.WriteResult<IndexResponse> result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary,
+ null);
+ request.primaryTerm(primary.getPrimaryTerm());
+ TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger);
+ return new PrimaryResult(request, result.getResponse());
}
- public void respond(ActionListener<IndexResponse> listener) {
- listener.onResponse(finalResponse);
+ @Override
+ protected void performOnReplica(IndexRequest request, IndexShard replica) {
+ Engine.Index index = TransportIndexAction.executeIndexRequestOnReplica(request, replica);
+ TransportWriteActionTestHelper.performPostWriteActions(replica, request, index.getTranslogLocation(), logger);
}
}
-
}
diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
index c6d7878406..407b374a92 100644
--- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
+++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
@@ -18,6 +18,22 @@
*/
package org.elasticsearch.index.replication;
+import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.InternalEngine;
+import org.elasticsearch.index.engine.InternalEngineTests;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardTests;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Future;
+
public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase {
public void testSimpleReplication() throws Exception {
@@ -28,4 +44,81 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
shards.assertAllEqual(docCount);
}
}
+
+ public void testSimpleAppendOnlyReplication() throws Exception {
+ try (ReplicationGroup shards = createGroup(randomInt(2))) {
+ shards.startAll();
+ final int docCount = randomInt(50);
+ shards.appendDocs(docCount);
+ shards.assertAllEqual(docCount);
+ }
+ }
+
+ public void testAppendWhileRecovering() throws Exception {
+ try (ReplicationGroup shards = createGroup(0)) {
+ shards.startAll();
+ IndexShard replica = shards.addReplica();
+ CountDownLatch latch = new CountDownLatch(2);
+ int numDocs = randomIntBetween(100, 200);
+ shards.appendDocs(1);// just append one to the translog so we can assert below
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.countDown();
+ latch.await();
+ shards.appendDocs(numDocs-1);
+ } catch (Exception e) {
+ throw new AssertionError(e);
+ }
+ }
+ };
+ thread.start();
+ Future<Void> future = shards.asyncRecoverReplica(replica, (indexShard, node)
+ -> new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) {
+ @Override
+ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
+ super.cleanFiles(totalTranslogOps, sourceMetaData);
+ latch.countDown();
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ }
+ });
+ future.get();
+ thread.join();
+ shards.assertAllEqual(numDocs);
+ Engine engine = IndexShardTests.getEngineFromShard(replica);
+ assertEquals("expected at no version lookups ", InternalEngineTests.getNumVersionLookups((InternalEngine) engine), 0);
+ for (IndexShard shard : shards) {
+ engine = IndexShardTests.getEngineFromShard(shard);
+ assertEquals(0, InternalEngineTests.getNumIndexVersionsLookups((InternalEngine) engine));
+ assertEquals(0, InternalEngineTests.getNumVersionLookups((InternalEngine) engine));
+ }
+ }
+ }
+
+ public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception {
+ try (ReplicationGroup shards = createGroup(0)) {
+ shards.startAll();
+ final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}");
+ indexRequest.onRetry(); // force an update of the timestamp
+ final IndexResponse response = shards.index(indexRequest);
+ assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
+ if (randomBoolean()) { // lets check if that also happens if no translog record is replicated
+ shards.flush();
+ }
+ IndexShard replica = shards.addReplica();
+ shards.recoverReplica(replica);
+
+ SegmentsStats segmentsStats = replica.segmentStats(false);
+ SegmentsStats primarySegmentStats = shards.getPrimary().segmentStats(false);
+ assertNotEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, primarySegmentStats.getMaxUnsafeAutoIdTimestamp());
+ assertEquals(primarySegmentStats.getMaxUnsafeAutoIdTimestamp(), segmentsStats.getMaxUnsafeAutoIdTimestamp());
+ assertNotEquals(Long.MAX_VALUE, segmentsStats.getMaxUnsafeAutoIdTimestamp());
+ }
+ }
+
}
diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
index 3e26e3018b..371764acc9 100644
--- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
+++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
@@ -18,14 +18,14 @@
*/
package org.elasticsearch.index.replication;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
import java.io.IOException;
import java.util.EnumSet;
@@ -62,10 +62,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
private final RecoveryState.Stage stageToBlock;
public static final EnumSet<RecoveryState.Stage> SUPPORTED_STAGES =
EnumSet.of(RecoveryState.Stage.INDEX, RecoveryState.Stage.TRANSLOG, RecoveryState.Stage.FINALIZE);
- private final ESLogger logger;
+ private final Logger logger;
BlockingTarget(RecoveryState.Stage stageToBlock, CountDownLatch recoveryBlocked, CountDownLatch releaseRecovery, IndexShard shard,
- DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener, ESLogger logger) {
+ DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, Logger logger) {
super(shard, sourceNode, listener, version -> {});
this.recoveryBlocked = recoveryBlocked;
this.releaseRecovery = releaseRecovery;
diff --git a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java
index 8821f0b9e7..c723538c83 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java
@@ -19,7 +19,7 @@
package org.elasticsearch.index.shard;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ESTestCase;
@@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.nullValue;
*
*/
public class CommitPointsTests extends ESTestCase {
- private final ESLogger logger = Loggers.getLogger(CommitPointsTests.class);
+ private final Logger logger = Loggers.getLogger(CommitPointsTests.class);
public void testCommitPointXContent() throws Exception {
ArrayList<CommitPoint.FileInfo> indexFiles = new ArrayList<>();
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
new file mode 100644
index 0000000000..fc943bcebe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -0,0 +1,476 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.shard;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.InternalClusterInfoService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.env.ShardLock;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.mapper.Mapping;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.UidFieldMapper;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.IndexSettingsModule;
+import org.elasticsearch.test.InternalSettingsPlugin;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.emptySet;
+import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
+import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+public class IndexShardIT extends ESSingleNodeTestCase {
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
+ }
+
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl,
+ ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
+ Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
+ Field versionField = new NumericDocValuesField("_version", 0);
+ document.add(uidField);
+ document.add(versionField);
+ return new ParsedDocument(versionField, id, type, routing, timestamp, ttl, Collections.singletonList(document), source,
+ mappingUpdate);
+ }
+
+ public void testLockTryingToDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+
+ ClusterService cs = getInstanceFromNode(ClusterService.class);
+ final Index index = cs.state().metaData().index("test").getIndex();
+ Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
+ logger.info("--> paths: [{}]", (Object)shardPaths);
+ // Should not be able to acquire the lock because it's already open
+ try {
+ NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths);
+ fail("should not have been able to acquire the lock");
+ } catch (LockObtainFailedException e) {
+ assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
+ }
+ // Test without the regular shard lock to assume we can acquire it
+ // (worst case, meaning that the shard lock could be acquired and
+ // we're green to delete the shard's directory)
+ ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
+ try {
+ env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
+ fail("should not have been able to delete the directory");
+ } catch (LockObtainFailedException e) {
+ assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
+ }
+ }
+
+ public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ client().prepareIndex("test", "test").setSource("{}").get();
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
+ assertBusy(() -> {
+ IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
+ assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
+ }
+ );
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+
+ public void testDurableFlagHasEffect() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "bar", "1").setSource("{}").get();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService(resolveIndex("test"));
+ IndexShard shard = test.getShardOrNull(0);
+ setDurability(shard, Translog.Durability.REQUEST);
+ assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durability.ASYNC);
+ client().prepareIndex("test", "bar", "2").setSource("{}").get();
+ assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durability.REQUEST);
+ client().prepareDelete("test", "bar", "1").get();
+ assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+
+ setDurability(shard, Translog.Durability.ASYNC);
+ client().prepareDelete("test", "bar", "2").get();
+ assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durability.REQUEST);
+ assertNoFailures(client().prepareBulk()
+ .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "1")).get());
+ assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+
+ setDurability(shard, Translog.Durability.ASYNC);
+ assertNoFailures(client().prepareBulk()
+ .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "3")).get());
+ setDurability(shard, Translog.Durability.REQUEST);
+ assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
+ }
+
+ private void setDurability(IndexShard shard, Translog.Durability durability) {
+ client().admin().indices().prepareUpdateSettings(shard.shardId().getIndexName()).setSettings(
+ Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get();
+ assertEquals(durability, shard.getTranslogDurability());
+ }
+
+ public void testUpdatePriority() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(IndexMetaData.SETTING_PRIORITY, 200));
+ IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
+ assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
+ .build()).get();
+ assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
+ }
+
+ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
+ Environment env = getInstanceFromNode(Environment.class);
+ Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
+ logger.info("--> idxPath: [{}]", idxPath);
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_DATA_PATH, idxPath)
+ .build();
+ createIndex("test", idxSettings);
+ ensureGreen("test");
+ client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
+ SearchResponse response = client().prepareSearch("test").get();
+ assertHitCount(response, 1L);
+ client().admin().indices().prepareDelete("test").get();
+ assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
+ assertPathHasBeenCleared(idxPath);
+ }
+
+ public void testExpectedShardSizeIsPresent() throws InterruptedException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ for (int i = 0; i < 50; i++) {
+ client().prepareIndex("test", "test").setSource("{}").get();
+ }
+ ensureGreen("test");
+ InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
+ clusterInfoService.refresh();
+ ClusterState state = getInstanceFromNode(ClusterService.class).state();
+ Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test")
+ .getShards().get(0).primaryShard());
+ assertNotNull(test);
+ assertTrue(test > 0);
+ }
+
+ public void testIndexCanChangeCustomDataPath() throws Exception {
+ Environment env = getInstanceFromNode(Environment.class);
+ Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
+ final String INDEX = "idx";
+ Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10));
+ Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10));
+ logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
+ logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
+ // temp dirs are automatically created, but the end dir is what
+ // startDir is going to be renamed as, so it needs to be deleted
+ // otherwise we get all sorts of errors about the directory
+ // already existing
+ IOUtils.rm(endDir);
+
+ Settings sb = Settings.builder()
+ .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
+ .build();
+ Settings sb2 = Settings.builder()
+ .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
+ .build();
+
+ logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
+ createIndex(INDEX, sb);
+ ensureGreen(INDEX);
+ client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
+
+ SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ logger.info("--> closing the index [{}]", INDEX);
+ client().admin().indices().prepareClose(INDEX).get();
+ logger.info("--> index closed, re-opening...");
+ client().admin().indices().prepareOpen(INDEX).get();
+ logger.info("--> index re-opened");
+ ensureGreen(INDEX);
+
+ resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ // Now, try closing and changing the settings
+
+ logger.info("--> closing the index [{}]", INDEX);
+ client().admin().indices().prepareClose(INDEX).get();
+
+ logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
+ assert Files.exists(endDir) == false : "end directory should not exist!";
+ Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
+
+ logger.info("--> updating settings...");
+ client().admin().indices().prepareUpdateSettings(INDEX)
+ .setSettings(sb2)
+ .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
+ .get();
+
+ assert Files.exists(startDir) == false : "start dir shouldn't exist";
+
+ logger.info("--> settings updated and files moved, re-opening index");
+ client().admin().indices().prepareOpen(INDEX).get();
+ logger.info("--> index re-opened");
+ ensureGreen(INDEX);
+
+ resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ assertAcked(client().admin().indices().prepareDelete(INDEX));
+ assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
+ assertPathHasBeenCleared(startDir.toAbsolutePath());
+ assertPathHasBeenCleared(endDir.toAbsolutePath());
+ }
+
+ public void testMaybeFlush() throws Exception {
+ createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)
+ .build());
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService(resolveIndex("test"));
+ IndexShard shard = test.getShardOrNull(0);
+ assertFalse(shard.shouldFlush());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
+ new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
+ client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ assertFalse(shard.shouldFlush());
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(),
+ new BytesArray(new byte[]{1}), null);
+ Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
+ shard.index(index);
+ assertTrue(shard.shouldFlush());
+ assertEquals(2, shard.getEngine().getTranslog().totalOperations());
+ client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ assertBusy(() -> { // this is async
+ assertFalse(shard.shouldFlush());
+ });
+ assertEquals(0, shard.getEngine().getTranslog().totalOperations());
+ shard.getEngine().getTranslog().sync();
+ long size = shard.getEngine().getTranslog().sizeInBytes();
+ logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
+ shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
+ IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
+ .build()).get();
+ client().prepareDelete("test", "test", "2").get();
+ logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
+ shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
+ assertBusy(() -> { // this is async
+ logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
+ shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
+ assertFalse(shard.shouldFlush());
+ });
+ assertEquals(0, shard.getEngine().getTranslog().totalOperations());
+ }
+
+ public void testStressMaybeFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService(resolveIndex("test"));
+ final IndexShard shard = test.getShardOrNull(0);
+ assertFalse(shard.shouldFlush());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
+ IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
+ new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
+ client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ assertFalse(shard.shouldFlush());
+ final AtomicBoolean running = new AtomicBoolean(true);
+ final int numThreads = randomIntBetween(2, 4);
+ Thread[] threads = new Thread[numThreads];
+ CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ while (running.get()) {
+ shard.maybeFlush();
+ }
+ }
+ };
+ threads[i].start();
+ }
+ barrier.await();
+ FlushStats flushStats = shard.flushStats();
+ long total = flushStats.getTotal();
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal()));
+ running.set(false);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ assertEquals(total + 1, shard.flushStats().getTotal());
+ }
+
+ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
+ createIndex("test");
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService indexService = indicesService.indexService(resolveIndex("test"));
+ IndexShard shard = indexService.getShardOrNull(0);
+ client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get();
+ client().prepareDelete("test", "test", "0").get();
+ client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
+
+ IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
+ shard.close("simon says", false);
+ AtomicReference<IndexShard> shardRef = new AtomicReference<>();
+ List<Exception> failures = new ArrayList<>();
+ IndexingOperationListener listener = new IndexingOperationListener() {
+
+ @Override
+ public void postIndex(Engine.Index index, boolean created) {
+ try {
+ assertNotNull(shardRef.get());
+ // this is all IMC needs to do - check current memory and refresh
+ assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
+ shardRef.get().refresh("test");
+ } catch (Exception e) {
+ failures.add(e);
+ throw e;
+ }
+ }
+
+
+ @Override
+ public void postDelete(Engine.Delete delete) {
+ try {
+ assertNotNull(shardRef.get());
+ // this is all IMC needs to do - check current memory and refresh
+ assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
+ shardRef.get().refresh("test");
+ } catch (Exception e) {
+ failures.add(e);
+ throw e;
+ }
+ }
+ };
+ final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener);
+ shardRef.set(newShard);
+ recoverShard(newShard);
+
+ try {
+ ExceptionsHelper.rethrowAndSuppress(failures);
+ } finally {
+ newShard.close("just do it", randomBoolean());
+ }
+ }
+
+
+ public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
+ DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
+ newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
+ assertTrue(newShard.recoverFromStore());
+ newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
+ return newShard;
+ }
+
+ public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper,
+ IndexingOperationListener... listeners) throws IOException {
+ ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
+ IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
+ shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
+ indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
+ indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners));
+ return newShard;
+ }
+
+ private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(),
+ existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING,
+ existingShardRouting.allocationId());
+ shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"),
+ RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
+ return shardRouting;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index d10deaa90f..b5d3d69705 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.index.shard;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.CorruptIndexException;
@@ -29,66 +30,43 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Constants;
-import org.apache.lucene.util.IOUtils;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
-import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
-import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.TransportIndexAction;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.PlainActionFuture;
-import org.elasticsearch.cluster.ClusterInfoService;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.AllocationId;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.env.ShardLock;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.fielddata.IndexFieldData;
-import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext;
@@ -98,9 +76,10 @@ import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
-import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.index.translog.TranslogTests;
+import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
@@ -109,20 +88,14 @@ import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.elasticsearch.test.DummyShardLock;
-import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.FieldMaskingReader;
-import org.elasticsearch.test.IndexSettingsModule;
-import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -133,6 +106,7 @@ import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -141,32 +115,33 @@ import java.util.function.BiConsumer;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
-import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
-import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
-import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
-import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
-import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex;
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.nullValue;
/**
* Simple unit-test IndexShard related operations.
*/
-public class IndexShardTests extends ESSingleNodeTestCase {
+public class IndexShardTests extends IndexShardTestCase {
- @Override
- protected Collection<Class<? extends Plugin>> getPlugins() {
- return pluginList(InternalSettingsPlugin.class);
+ public static ShardStateMetaData load(Logger logger, Path... shardPaths) throws IOException {
+ return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths);
+ }
+
+ public static void write(ShardStateMetaData shardStateMetaData,
+ Path... shardPaths) throws IOException {
+ ShardStateMetaData.FORMAT.write(shardStateMetaData, shardPaths);
+ }
+
+ public static Engine getEngineFromShard(IndexShard shard) {
+ return shard.getEngineOrNull();
}
public void testWriteShardState() throws Exception {
@@ -193,73 +168,41 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
}
- public void testLockTryingToDelete() throws Exception {
- createIndex("test");
- ensureGreen();
- NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
-
- ClusterService cs = getInstanceFromNode(ClusterService.class);
- final Index index = cs.state().metaData().index("test").getIndex();
- Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
- logger.info("--> paths: [{}]", (Object)shardPaths);
- // Should not be able to acquire the lock because it's already open
- try {
- NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths);
- fail("should not have been able to acquire the lock");
- } catch (LockObtainFailedException e) {
- assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
- }
- // Test without the regular shard lock to assume we can acquire it
- // (worst case, meaning that the shard lock could be acquired and
- // we're green to delete the shard's directory)
- ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
- try {
- env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
- fail("should not have been able to delete the directory");
- } catch (LockObtainFailedException e) {
- assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
- }
- }
-
public void testPersistenceStateMetadataPersistence() throws Exception {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
- ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ IndexShard shard = newStartedShard();
+ final Path shardStatePath = shard.shardPath().getShardStatePath();
+ ShardStateMetaData shardStateMetaData = load(logger, shardStatePath);
assertEquals(getShardStateMetadata(shard), shardStateMetaData);
ShardRouting routing = shard.shardRouting;
shard.updateRoutingEntry(routing);
- shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ shardStateMetaData = load(logger, shardStatePath);
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
- assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
+ assertEquals(shardStateMetaData,
+ new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
routing = TestShardRouting.relocate(shard.shardRouting, "some node", 42L);
shard.updateRoutingEntry(routing);
- shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ shardStateMetaData = load(logger, shardStatePath);
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
- assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
+ assertEquals(shardStateMetaData,
+ new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
+ closeShards(shard);
}
public void testFailShard() throws Exception {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
+ IndexShard shard = newStartedShard();
+ final ShardPath shardPath = shard.shardPath();
+ assertNotNull(shardPath);
// fail shard
shard.failShard("test shard fail", new CorruptIndexException("", ""));
+ closeShards(shard);
// check state file still exists
- ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath());
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
- ShardPath shardPath = ShardPath.loadShardPath(logger, env, shard.shardId(), test.getIndexSettings());
- assertNotNull(shardPath);
// but index can't be opened for a failed shard
- assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), env::shardLock),
+ assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(),
+ (shardId, lockTimeoutMS) -> new DummyShardLock(shardId)),
equalTo(false));
}
@@ -282,10 +225,12 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testShardStateMetaHashCodeEquals() {
AllocationId allocationId = randomBoolean() ? null : randomAllocationId();
- ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
+ ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(),
+ randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
assertEquals(meta, new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId));
- assertEquals(meta.hashCode(), new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId).hashCode());
+ assertEquals(meta.hashCode(),
+ new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId).hashCode());
assertFalse(meta.equals(new ShardStateMetaData(meta.legacyVersion, !meta.primary, meta.indexUUID, meta.allocationId)));
assertFalse(meta.equals(new ShardStateMetaData(meta.legacyVersion + 1, meta.primary, meta.indexUUID, meta.allocationId)));
@@ -294,20 +239,17 @@ public class IndexShardTests extends ESSingleNodeTestCase {
Set<Integer> hashCodes = new HashSet<>();
for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode
allocationId = randomBoolean() ? null : randomAllocationId();
- meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
+ meta = new ShardStateMetaData(randomLong(), randomBoolean(),
+ randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
hashCodes.add(meta.hashCode());
}
assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1);
}
- public void testDeleteIndexPreventsNewOperations() throws InterruptedException, ExecutionException, IOException {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
- ensureGreen("test");
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
- IndexShard indexShard = indexService.getShardOrNull(0);
- client().admin().indices().prepareDelete("test").get();
+ public void testClosesPreventsNewOperations() throws InterruptedException, ExecutionException, IOException {
+ IndexShard indexShard = newStartedShard();
+ closeShards(indexShard);
assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
try {
indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX);
@@ -324,35 +266,27 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
public void testOperationLocksOnPrimaryShards() throws InterruptedException, ExecutionException, IOException {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
- ensureGreen("test");
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
- IndexShard indexShard = indexService.getShardOrNull(0);
- long primaryTerm = indexShard.getPrimaryTerm();
-
- ShardRouting temp = indexShard.routingEntry();
- final ShardRouting newPrimaryShardRouting;
+ final ShardId shardId = new ShardId("test", "_na_", 0);
+ final IndexShard indexShard;
+
if (randomBoolean()) {
// relocation target
- newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), "other node",
- true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(temp.allocationId()));
+ indexShard = newShard(TestShardRouting.newShardRouting(shardId, "local_node", "other node",
+ true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing())));
} else if (randomBoolean()) {
// simulate promotion
- ShardRouting newReplicaShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null,
- false, ShardRoutingState.STARTED, temp.allocationId());
- indexShard.updateRoutingEntry(newReplicaShardRouting);
- primaryTerm = primaryTerm + 1;
- indexShard.updatePrimaryTerm(primaryTerm);
- newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null,
- true, ShardRoutingState.STARTED, temp.allocationId());
+ indexShard = newShard(shardId, false);
+ ShardRouting replicaRouting = indexShard.routingEntry();
+ indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1);
+ ShardRouting primaryRouting = TestShardRouting.newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null,
+ true, ShardRoutingState.STARTED, replicaRouting.allocationId());
+ indexShard.updateRoutingEntry(primaryRouting);
} else {
- newPrimaryShardRouting = temp;
+ indexShard = newStartedShard(true);
}
- indexShard.updateRoutingEntry(newPrimaryShardRouting);
-
+ final long primaryTerm = indexShard.getPrimaryTerm();
assertEquals(0, indexShard.getActiveOperationsCount());
- if (newPrimaryShardRouting.isRelocationTarget() == false) {
+ if (indexShard.routingEntry().isRelocationTarget() == false) {
try {
indexShard.acquireReplicaOperationLock(primaryTerm, null, ThreadPool.Names.INDEX);
fail("shard shouldn't accept operations as replica");
@@ -367,6 +301,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
+
+ closeShards(indexShard);
}
private Releasable acquirePrimaryOperationLockBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException {
@@ -375,56 +311,52 @@ public class IndexShardTests extends ESSingleNodeTestCase {
return fut.get();
}
- private Releasable acquireReplicaOperationLockBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException {
+ private Releasable acquireReplicaOperationLockBlockingly(IndexShard indexShard, long opPrimaryTerm)
+ throws ExecutionException, InterruptedException {
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
indexShard.acquireReplicaOperationLock(opPrimaryTerm, fut, ThreadPool.Names.INDEX);
return fut.get();
}
public void testOperationLocksOnReplicaShards() throws InterruptedException, ExecutionException, IOException {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
- ensureGreen("test");
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
- IndexShard indexShard = indexService.getShardOrNull(0);
- long primaryTerm = indexShard.getPrimaryTerm();
-
- // ugly hack to allow the shard to operated as a replica
- final ShardRouting temp = indexShard.routingEntry();
- final ShardRouting newShardRouting;
+ final ShardId shardId = new ShardId("test", "_na_", 0);
+ final IndexShard indexShard;
+
switch (randomInt(2)) {
case 0:
// started replica
- newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null,
- false, ShardRoutingState.STARTED, AllocationId.newRelocation(temp.allocationId()));
-
- indexShard.updateRoutingEntry(newShardRouting);
+ indexShard = newStartedShard(false);
break;
- case 1:
+ case 1: {
// initializing replica / primary
final boolean relocating = randomBoolean();
- newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(),
+ ShardRouting routing = TestShardRouting.newShardRouting(shardId, "local_node",
relocating ? "sourceNode" : null,
relocating ? randomBoolean() : false,
ShardRoutingState.INITIALIZING,
- relocating ? AllocationId.newRelocation(temp.allocationId()) : temp.allocationId());
- indexShard.updateRoutingEntry(newShardRouting);
+ relocating ? AllocationId.newRelocation(AllocationId.newInitializing()) : AllocationId.newInitializing());
+ indexShard = newShard(routing);
break;
- case 2:
+ }
+ case 2: {
// relocation source
- newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), "otherNode",
- false, ShardRoutingState.RELOCATING, AllocationId.newRelocation(temp.allocationId()));
- indexShard.updateRoutingEntry(newShardRouting);
+ indexShard = newStartedShard(true);
+ ShardRouting routing = indexShard.routingEntry();
+ routing = TestShardRouting.newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode",
+ true, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId()));
+ indexShard.updateRoutingEntry(routing);
indexShard.relocated("test");
break;
+ }
default:
throw new UnsupportedOperationException("get your numbers straight");
}
- logger.info("updated shard routing to {}", newShardRouting);
+ final ShardRouting shardRouting = indexShard.routingEntry();
+ logger.info("shard routing to {}", shardRouting);
assertEquals(0, indexShard.getActiveOperationsCount());
- if (newShardRouting.primary() == false) {
+ if (shardRouting.primary() == false) {
try {
indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX);
fail("shard shouldn't accept primary ops");
@@ -433,6 +365,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
}
+ final long primaryTerm = indexShard.getPrimaryTerm();
+
Releasable operation1 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm);
assertEquals(1, indexShard.getActiveOperationsCount());
Releasable operation2 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm);
@@ -450,84 +384,51 @@ public class IndexShardTests extends ESSingleNodeTestCase {
acquireReplicaOperationLockBlockingly(indexShard, primaryTerm + 1 + randomInt(20)).close();
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
- }
-
- public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
- assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
- client().prepareIndex("test", "test").setSource("{}").get();
- ensureGreen("test");
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
- assertBusy(() -> {
- IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
- assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
- indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
- }
- );
- IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
- assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
- }
- public static ShardStateMetaData load(ESLogger logger, Path... shardPaths) throws IOException {
- return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths);
- }
-
- public static void write(ShardStateMetaData shardStateMetaData,
- Path... shardPaths) throws IOException {
- ShardStateMetaData.FORMAT.write(shardStateMetaData, shardPaths);
+ closeShards(indexShard);
}
public void testAcquireIndexCommit() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
+ final IndexShard shard = newStartedShard();
int numDocs = randomInt(20);
for (int i = 0; i < numDocs; i++) {
- client().prepareIndex("test", "type", "id_" + i).setSource("{}").get();
+ indexDoc(shard, "type", "id_" + i);
}
final boolean flushFirst = randomBoolean();
IndexCommit commit = shard.acquireIndexCommit(flushFirst);
int moreDocs = randomInt(20);
for (int i = 0; i < moreDocs; i++) {
- client().prepareIndex("test", "type", "id_" + numDocs + i).setSource("{}").get();
+ indexDoc(shard, "type", "id_" + numDocs + i);
}
- shard.flush(new FlushRequest("index"));
+ flushShard(shard);
// check that we can still read the commit that we captured
try (IndexReader reader = DirectoryReader.open(commit)) {
assertThat(reader.numDocs(), equalTo(flushFirst ? numDocs : 0));
}
shard.releaseIndexCommit(commit);
- shard.flush(new FlushRequest("index").force(true));
+ flushShard(shard, true);
+
// check it's clean up
assertThat(DirectoryReader.listCommits(shard.store().directory()), hasSize(1));
+
+ closeShards(shard);
}
/***
* test one can snapshot the store at various lifecycle stages
*/
public void testSnapshotStore() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
- client().admin().indices().prepareFlush().get();
- ShardRouting routing = shard.routingEntry();
- test.removeShard(0, "b/c simon says so");
- routing = ShardRoutingHelper.reinit(routing);
- IndexShard newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
+ final IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0");
+ flushShard(shard);
+
+ final IndexShard newShard = reinitShard(shard);
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata();
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
- localNode));
+ newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
snapshot = newShard.snapshotStoreMetadata();
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
@@ -537,7 +438,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
snapshot = newShard.snapshotStoreMetadata();
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
- newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted());
+ newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
snapshot = newShard.snapshotStoreMetadata();
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
@@ -546,203 +447,70 @@ public class IndexShardTests extends ESSingleNodeTestCase {
snapshot = newShard.snapshotStoreMetadata();
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
- }
-
- public void testDurableFlagHasEffect() {
- createIndex("test");
- ensureGreen();
- client().prepareIndex("test", "bar", "1").setSource("{}").get();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
- setDurability(shard, Translog.Durability.REQUEST);
- assertFalse(shard.getEngine().getTranslog().syncNeeded());
- setDurability(shard, Translog.Durability.ASYNC);
- client().prepareIndex("test", "bar", "2").setSource("{}").get();
- assertTrue(shard.getEngine().getTranslog().syncNeeded());
- setDurability(shard, Translog.Durability.REQUEST);
- client().prepareDelete("test", "bar", "1").get();
- assertFalse(shard.getEngine().getTranslog().syncNeeded());
-
- setDurability(shard, Translog.Durability.ASYNC);
- client().prepareDelete("test", "bar", "2").get();
- assertTrue(shard.getEngine().getTranslog().syncNeeded());
- setDurability(shard, Translog.Durability.REQUEST);
- assertNoFailures(client().prepareBulk()
- .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
- .add(client().prepareDelete("test", "bar", "1")).get());
- assertFalse(shard.getEngine().getTranslog().syncNeeded());
-
- setDurability(shard, Translog.Durability.ASYNC);
- assertNoFailures(client().prepareBulk()
- .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
- .add(client().prepareDelete("test", "bar", "3")).get());
- setDurability(shard, Translog.Durability.REQUEST);
- assertTrue(shard.getEngine().getTranslog().syncNeeded());
- }
- private void setDurability(IndexShard shard, Translog.Durability durability) {
- client().admin().indices().prepareUpdateSettings(shard.shardId.getIndexName()).setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get();
- assertEquals(durability, shard.getTranslogDurability());
+ closeShards(newShard);
}
- public void testMinimumCompatVersion() {
- Version versionCreated = VersionUtils.randomVersion(random());
- assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, SETTING_VERSION_CREATED, versionCreated.id));
- client().prepareIndex("test", "test").setSource("{}").get();
- ensureGreen("test");
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0);
- assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
- client().prepareIndex("test", "test").setSource("{}").get();
- assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
- test.getEngine().flush();
- assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion());
- }
-
- public void testUpdatePriority() {
- assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(IndexMetaData.SETTING_PRIORITY, 200));
- IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
- assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
- client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get();
- assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
- }
-
- public void testRecoverIntoLeftover() throws IOException {
- createIndex("test");
- ensureGreen("test");
- client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
- client().admin().indices().prepareFlush("test").get();
- SearchResponse response = client().prepareSearch("test").get();
- assertHitCount(response, 1L);
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
- ShardPath shardPath = shard.shardPath();
- Path dataPath = shardPath.getDataPath();
- client().admin().indices().prepareClose("test").get();
- Path tempDir = createTempDir();
- Files.move(dataPath, tempDir.resolve("test"));
- client().admin().indices().prepareDelete("test").get();
- Files.createDirectories(dataPath.getParent());
- Files.move(tempDir.resolve("test"), dataPath);
- createIndex("test");
- ensureGreen("test");
- response = client().prepareSearch("test").get();
- assertHitCount(response, 0L);
- }
+ public void testAsyncFsync() throws InterruptedException, IOException {
+ IndexShard shard = newStartedShard();
+ Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
+ Thread[] thread = new Thread[randomIntBetween(3, 5)];
+ CountDownLatch latch = new CountDownLatch(thread.length);
+ for (int i = 0; i < thread.length; i++) {
+ thread[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.countDown();
+ latch.await();
+ for (int i = 0; i < 10000; i++) {
+ semaphore.acquire();
+ shard.sync(TranslogTests.randomTranslogLocation(), (ex) -> semaphore.release());
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
- public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
- Environment env = getInstanceFromNode(Environment.class);
- Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
- logger.info("--> idxPath: [{}]", idxPath);
- Settings idxSettings = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, idxPath)
- .build();
- createIndex("test", idxSettings);
- ensureGreen("test");
- client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
- SearchResponse response = client().prepareSearch("test").get();
- assertHitCount(response, 1L);
- client().admin().indices().prepareDelete("test").get();
- assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
- assertPathHasBeenCleared(idxPath);
- }
+ ;
+ };
+ thread[i].start();
+ }
- public void testExpectedShardSizeIsPresent() throws InterruptedException {
- assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
- for (int i = 0; i < 50; i++) {
- client().prepareIndex("test", "test").setSource("{}").get();
+ for (int i = 0; i < thread.length; i++) {
+ thread[i].join();
}
- ensureGreen("test");
- InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
- clusterInfoService.refresh();
- ClusterState state = getInstanceFromNode(ClusterService.class).state();
- Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test").getShards().get(0).primaryShard());
- assertNotNull(test);
- assertTrue(test > 0);
+ assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
+
+ closeShards(shard);
}
- public void testIndexCanChangeCustomDataPath() throws Exception {
- Environment env = getInstanceFromNode(Environment.class);
- Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
- final String INDEX = "idx";
- Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10));
- Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10));
- logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
- logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
- // temp dirs are automatically created, but the end dir is what
- // startDir is going to be renamed as, so it needs to be deleted
- // otherwise we get all sorts of errors about the directory
- // already existing
- IOUtils.rm(endDir);
-
- Settings sb = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
- .build();
- Settings sb2 = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
+ public void testMinimumCompatVersion() throws IOException {
+ Version versionCreated = VersionUtils.randomVersion(random());
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, versionCreated.id)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
+ IndexMetaData metaData = IndexMetaData.builder("test")
+ .settings(settings)
+ .primaryTerm(0, 1).build();
+ IndexShard test = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
+ recoveryShardFromStore(test);
- logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
- createIndex(INDEX, sb);
- ensureGreen(INDEX);
- client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
-
- SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
- assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
-
- logger.info("--> closing the index [{}]", INDEX);
- client().admin().indices().prepareClose(INDEX).get();
- logger.info("--> index closed, re-opening...");
- client().admin().indices().prepareOpen(INDEX).get();
- logger.info("--> index re-opened");
- ensureGreen(INDEX);
-
- resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
- assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
-
- // Now, try closing and changing the settings
-
- logger.info("--> closing the index [{}]", INDEX);
- client().admin().indices().prepareClose(INDEX).get();
-
- logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
- assert Files.exists(endDir) == false : "end directory should not exist!";
- Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
-
- logger.info("--> updating settings...");
- client().admin().indices().prepareUpdateSettings(INDEX)
- .setSettings(sb2)
- .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
- .get();
-
- assert Files.exists(startDir) == false : "start dir shouldn't exist";
-
- logger.info("--> settings updated and files moved, re-opening index");
- client().admin().indices().prepareOpen(INDEX).get();
- logger.info("--> index re-opened");
- ensureGreen(INDEX);
-
- resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
- assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+ indexDoc(test, "test", "test");
+ assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
+ indexDoc(test, "test", "test");
+ assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
+ test.getEngine().flush();
+ assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion());
- assertAcked(client().admin().indices().prepareDelete(INDEX));
- assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
- assertPathHasBeenCleared(startDir.toAbsolutePath());
- assertPathHasBeenCleared(endDir.toAbsolutePath());
+ closeShards(test);
}
public void testShardStats() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
- ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats());
+ IndexShard shard = newStartedShard();
+ ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(),
+ new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats());
assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath());
assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath());
assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath());
@@ -753,7 +521,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
StreamInput in = out.bytes().streamInput();
stats = ShardStats.readShardStats(in);
}
- XContentBuilder builder = XContentFactory.jsonBuilder();
+ XContentBuilder builder = jsonBuilder();
builder.startObject();
stats.toXContent(builder, EMPTY_PARAMS);
builder.endObject();
@@ -763,11 +531,16 @@ public class IndexShardTests extends ESSingleNodeTestCase {
expectedSubSequence.append("\",\"data_path\":\"");
expectedSubSequence.append(shard.shardPath().getRootDataPath().toString());
expectedSubSequence.append("\",\"is_custom_data_path\":").append(shard.shardPath().isCustomDataPath()).append("}");
- assumeFalse("Some path weirdness on windows", Constants.WINDOWS);
- assertTrue(xContent.contains(expectedSubSequence));
+ if (Constants.WINDOWS) {
+ // Some path weirdness on windows
+ } else {
+ assertTrue(xContent.contains(expectedSubSequence));
+ }
+ closeShards(shard);
}
- private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl,
+ ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0);
document.add(uidField);
@@ -776,12 +549,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
public void testIndexingOperationsListeners() throws IOException {
- createIndex("test_iol");
- ensureGreen();
- client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test_iol"));
- IndexShard shard = test.getShardOrNull(0);
+ IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
AtomicInteger preIndex = new AtomicInteger();
AtomicInteger postIndexCreate = new AtomicInteger();
AtomicInteger postIndexUpdate = new AtomicInteger();
@@ -790,7 +559,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
AtomicInteger postDelete = new AtomicInteger();
AtomicInteger postDeleteException = new AtomicInteger();
shard.close("simon says", true);
- shard = reinitWithWrapper(test, shard, null, new IndexingOperationListener() {
+ shard = reinitShard(shard, new IndexingOperationListener() {
@Override
public Engine.Index preIndex(Engine.Index operation) {
preIndex.incrementAndGet();
@@ -799,7 +568,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
@Override
public void postIndex(Engine.Index index, boolean created) {
- if(created) {
+ if (created) {
postIndexCreate.incrementAndGet();
} else {
postIndexUpdate.incrementAndGet();
@@ -828,8 +597,10 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
});
+ recoveryShardFromStore(shard);
- ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null);
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(),
+ new BytesArray(new byte[]{1}), null);
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
shard.index(index);
assertEquals(1, preIndex.get());
@@ -891,95 +662,13 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertEquals(1, preDelete.get());
assertEquals(1, postDelete.get());
assertEquals(0, postDeleteException.get());
- }
- public void testMaybeFlush() throws Exception {
- createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build());
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = test.getShardOrNull(0);
- assertFalse(shard.shouldFlush());
- client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
- assertFalse(shard.shouldFlush());
- ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null);
- Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
- shard.index(index);
- assertTrue(shard.shouldFlush());
- assertEquals(2, shard.getEngine().getTranslog().totalOperations());
- client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
- assertBusy(() -> { // this is async
- assertFalse(shard.shouldFlush());
- });
- assertEquals(0, shard.getEngine().getTranslog().totalOperations());
- shard.getEngine().getTranslog().sync();
- long size = shard.getEngine().getTranslog().sizeInBytes();
- logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
- client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
- .build()).get();
- client().prepareDelete("test", "test", "2").get();
- logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
- assertBusy(() -> { // this is async
- logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
- assertFalse(shard.shouldFlush());
- });
- assertEquals(0, shard.getEngine().getTranslog().totalOperations());
- }
-
- public void testStressMaybeFlush() throws Exception {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
- assertFalse(shard.shouldFlush());
- client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
- assertFalse(shard.shouldFlush());
- final AtomicBoolean running = new AtomicBoolean(true);
- final int numThreads = randomIntBetween(2, 4);
- Thread[] threads = new Thread[numThreads];
- CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
- for (int i = 0; i < threads.length; i++) {
- threads[i] = new Thread() {
- @Override
- public void run() {
- try {
- barrier.await();
- } catch (InterruptedException | BrokenBarrierException e) {
- throw new RuntimeException(e);
- }
- while (running.get()) {
- shard.maybeFlush();
- }
- }
- };
- threads[i].start();
- }
- barrier.await();
- FlushStats flushStats = shard.flushStats();
- long total = flushStats.getTotal();
- client().prepareIndex("test", "test", "1").setSource("{}").get();
- assertBusy(() -> {
- assertEquals(total + 1, shard.flushStats().getTotal());
- });
- running.set(false);
- for (int i = 0; i < threads.length; i++) {
- threads[i].join();
- }
- assertEquals(total + 1, shard.flushStats().getTotal());
+ closeShards(shard);
}
public void testLockingBeforeAndAfterRelocated() throws Exception {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(
- Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
- ).get());
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
- assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED)));
+ final IndexShard shard = newStartedShard(true);
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
CountDownLatch latch = new CountDownLatch(1);
Thread recoveryThread = new Thread(() -> {
latch.countDown();
@@ -1004,17 +693,13 @@ public class IndexShardTests extends ESSingleNodeTestCase {
// lock can again be acquired
assertThat(shard.state(), equalTo(IndexShardState.RELOCATED));
}
+
+ closeShards(shard);
}
public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(
- Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
- ).get());
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
- assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED)));
+ final IndexShard shard = newStartedShard(true);
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
Thread recoveryThread = new Thread(() -> {
try {
shard.relocated("simulated recovery");
@@ -1042,16 +727,13 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
recoveryThread.join();
+
+ closeShards(shard);
}
public void testStressRelocated() throws Exception {
- assertAcked(client().admin().indices().prepareCreate("test").setSettings(
- Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
- ).get());
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
+ final IndexShard shard = newStartedShard(true);
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
final int numThreads = randomIntBetween(2, 4);
Thread[] indexThreads = new Thread[numThreads];
CountDownLatch allPrimaryOperationLocksAcquired = new CountDownLatch(numThreads);
@@ -1099,88 +781,139 @@ public class IndexShardTests extends ESSingleNodeTestCase {
for (Thread indexThread : indexThreads) {
indexThread.join();
}
+
+ closeShards(shard);
+ }
+
+ public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedException {
+ final IndexShard shard = newStartedShard(true);
+ final ShardRouting originalRouting = shard.routingEntry();
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node"));
+ shard.relocated("test");
+ expectThrows(IllegalIndexShardStateException.class, () -> shard.updateRoutingEntry(originalRouting));
+ closeShards(shard);
+ }
+
+ public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException, InterruptedException {
+ final IndexShard shard = newStartedShard(true);
+ final ShardRouting originalRouting = shard.routingEntry();
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node"));
+ shard.updateRoutingEntry(originalRouting);
+ expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test"));
+ closeShards(shard);
+ }
+
+ public void testRelocatedShardCanNotBeRevivedConcurrently() throws IOException, InterruptedException, BrokenBarrierException {
+ final IndexShard shard = newStartedShard(true);
+ final ShardRouting originalRouting = shard.routingEntry();
+ shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node"));
+ CyclicBarrier cyclicBarrier = new CyclicBarrier(3);
+ AtomicReference<Exception> relocationException = new AtomicReference<>();
+ Thread relocationThread = new Thread(new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ relocationException.set(e);
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ cyclicBarrier.await();
+ shard.relocated("test");
+ }
+ });
+ relocationThread.start();
+ AtomicReference<Exception> cancellingException = new AtomicReference<>();
+ Thread cancellingThread = new Thread(new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ cancellingException.set(e);
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ cyclicBarrier.await();
+ shard.updateRoutingEntry(originalRouting);
+ }
+ });
+ cancellingThread.start();
+ cyclicBarrier.await();
+ relocationThread.join();
+ cancellingThread.join();
+ if (shard.state() == IndexShardState.RELOCATED) {
+ logger.debug("shard was relocated successfully");
+ assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class));
+ assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true));
+ assertThat(relocationException.get(), nullValue());
+ } else {
+ logger.debug("shard relocation was cancelled");
+ assertThat(relocationException.get(), instanceOf(IllegalIndexShardStateException.class));
+ assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(false));
+ assertThat(cancellingException.get(), nullValue());
+
+ }
+ closeShards(shard);
}
public void testRecoverFromStore() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
+ final IndexShard shard = newStartedShard(true);
int translogOps = 1;
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ indexDoc(shard, "test", "0");
if (randomBoolean()) {
- client().admin().indices().prepareFlush().get();
+ flushShard(shard);
translogOps = 0;
}
- ShardRouting routing = shard.routingEntry();
- test.removeShard(0, "b/c simon says so");
- routing = ShardRoutingHelper.reinit(routing);
- IndexShard newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
+ IndexShard newShard = reinitShard(shard);
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
+ newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
assertTrue(newShard.recoverFromStore());
assertEquals(translogOps, newShard.recoveryState().getTranslog().recoveredOperations());
assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperations());
assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperationsOnStart());
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
- newShard.updateRoutingEntry(routing.moveToStarted());
- SearchResponse response = client().prepareSearch().get();
- assertHitCount(response, 1);
+ newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
+ assertDocCount(newShard, 1);
+ closeShards(newShard);
}
public void testRecoverFromCleanStore() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ final IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0");
if (randomBoolean()) {
- client().admin().indices().prepareFlush().get();
+ flushShard(shard);
}
- ShardRouting routing = shard.routingEntry();
- test.removeShard(0, "b/c simon says so");
- routing = ShardRoutingHelper.reinit(routing, UnassignedInfo.Reason.INDEX_CREATED);
- IndexShard newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
+ final ShardRouting shardRouting = shard.routingEntry();
+ IndexShard newShard = reinitShard(shard,
+ ShardRoutingHelper.initWithSameId(shardRouting, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE)
+ );
+
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
- localNode));
+ newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
assertTrue(newShard.recoverFromStore());
assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart());
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
- newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted());
- SearchResponse response = client().prepareSearch().get();
- assertHitCount(response, 0);
+ newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
+ assertDocCount(newShard, 0);
+ closeShards(newShard);
}
public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
-
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
+ final IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0");
if (randomBoolean()) {
- client().admin().indices().prepareFlush().get();
+ flushShard(shard);
}
- final ShardRouting origRouting = shard.routingEntry();
- ShardRouting routing = origRouting;
+
Store store = shard.store();
store.incRef();
- test.removeShard(0, "b/c simon says so");
+ closeShards(shard);
cleanLuceneIndex(store.directory());
store.decRef();
- routing = ShardRoutingHelper.reinit(routing);
- IndexShard newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
+ IndexShard newShard = reinitShard(shard);
+ DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
+ ShardRouting routing = newShard.routingEntry();
+ newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null));
try {
newShard.recoverFromStore();
fail("index not there!");
@@ -1189,38 +922,33 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
routing = ShardRoutingHelper.moveToUnassigned(routing, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so"));
- routing = ShardRoutingHelper.initialize(routing, origRouting.currentNodeId());
+ routing = ShardRoutingHelper.initialize(routing, newShard.routingEntry().currentNodeId());
assertTrue("it's already recovering, we should ignore new ones", newShard.ignoreRecoveryAttempt());
try {
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
+ newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null));
fail("we are already recovering, can't mark again");
} catch (IllegalIndexShardStateException e) {
// OK!
}
- test.removeShard(0, "I broken it");
- newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
+
+ newShard = reinitShard(newShard,
+ ShardRoutingHelper.initWithSameId(routing, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE));
+ newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
assertTrue("recover even if there is nothing to recover", newShard.recoverFromStore());
- newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted());
- SearchResponse response = client().prepareSearch().get();
- assertHitCount(response, 0);
+ newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
+ assertDocCount(newShard, 0);
// we can't issue this request through a client because of the inconsistencies we created with the cluster state
// doing it directly instead
- IndexRequest request = client().prepareIndex("test", "test", "0").setSource("{}").request();
- request.process(null, false, "test");
- TransportIndexAction.executeIndexRequestOnPrimary(request, newShard, null);
+ indexDoc(newShard, "test", "0");
newShard.refresh("test");
- assertHitCount(client().prepareSearch().get(), 1);
+ assertDocCount(newShard, 1);
+
+ closeShards(newShard);
}
public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException, IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- final IndexShard shard = test.getShardOrNull(0);
+ final IndexShard shard = newStartedShard(true);
ShardRouting origRouting = shard.routingEntry();
assertThat(shard.state(), equalTo(IndexShardState.STARTED));
ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node");
@@ -1232,37 +960,37 @@ public class IndexShardTests extends ESSingleNodeTestCase {
fail("Expected IndexShardRelocatedException");
} catch (IndexShardRelocatedException expected) {
}
+
+ closeShards(shard);
}
public void testRestoreShard() throws IOException {
- createIndex("test");
- createIndex("test_target");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("test"));
- IndexService test_target = indicesService.indexService(resolveIndex("test_target"));
- final IndexShard test_shard = test.getShardOrNull(0);
-
- client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
- client().prepareIndex("test_target", "test", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
- assertHitCount(client().prepareSearch("test_target").get(), 1);
- assertSearchHits(client().prepareSearch("test_target").get(), "1");
- client().admin().indices().prepareFlush("test").get(); // only flush test
- final ShardRouting origRouting = test_target.getShardOrNull(0).routingEntry();
- ShardRouting routing = ShardRoutingHelper.reinit(origRouting);
+ final IndexShard source = newStartedShard(true);
+ IndexShard target = newStartedShard(true);
+
+ indexDoc(source, "test", "0");
+ if (randomBoolean()) {
+ source.refresh("test");
+ }
+ indexDoc(target, "test", "1");
+ target.refresh("test");
+ assertDocs(target, new Uid("test", "1"));
+ flushShard(source); // only flush source
+ final ShardRouting origRouting = target.routingEntry();
+ ShardRouting routing = ShardRoutingHelper.reinitPrimary(origRouting);
final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID()));
- routing = ShardRoutingHelper.newWithRestoreSource(routing, new RestoreSource(snapshot, Version.CURRENT, "test"));
- test_target.removeShard(0, "just do it man!");
- final IndexShard test_target_shard = test_target.createShard(routing);
- Store sourceStore = test_shard.store();
- Store targetStore = test_target_shard.store();
+ routing = ShardRoutingHelper.newWithRestoreSource(routing,
+ new RecoverySource.SnapshotRecoverySource(snapshot, Version.CURRENT, "test"));
+ target = reinitShard(target, routing);
+ Store sourceStore = source.store();
+ Store targetStore = target.store();
- test_target_shard.updateRoutingEntry(routing);
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- test_target_shard.markAsRecovering("store", new RecoveryState(routing.shardId(), routing.primary(), RecoveryState.Type.SNAPSHOT, routing.restoreSource(), localNode));
- assertTrue(test_target_shard.restoreFromRepository(new RestoreOnlyRepository("test") {
+ target.markAsRecovering("store", new RecoveryState(routing, localNode, null));
+ assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") {
@Override
- public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
+ public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
+ RecoveryState recoveryState) {
try {
cleanLuceneIndex(targetStore.directory());
for (String file : sourceStore.directory().listAll()) {
@@ -1277,19 +1005,17 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
}));
- test_target_shard.updateRoutingEntry(routing.moveToStarted());
- assertHitCount(client().prepareSearch("test_target").get(), 1);
- assertSearchHits(client().prepareSearch("test_target").get(), "0");
+ target.updateRoutingEntry(routing.moveToStarted());
+ assertDocs(target, new Uid("test", "0"));
+
+ closeShards(source, target);
}
public void testSearcherWrapperIsUsed() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = indexService.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
- client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
+ IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+ indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}");
+ shard.refresh("test");
Engine.GetResult getResult = shard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1"))));
assertTrue(getResult.exists());
@@ -1312,35 +1038,28 @@ public class IndexShardTests extends ESSingleNodeTestCase {
return searcher;
}
};
- shard.close("simon says", true);
- IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper);
- try {
- try (Engine.Searcher searcher = newShard.acquireSearcher("test")) {
- TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10);
- assertEquals(search.totalHits, 0);
- search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10);
- assertEquals(search.totalHits, 1);
- }
- getResult = newShard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1"))));
- assertTrue(getResult.exists());
- assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
- assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader);
- getResult.release();
- } finally {
- newShard.close("just do it", randomBoolean());
+ closeShards(shard);
+ IndexShard newShard = newShard(ShardRoutingHelper.reinitPrimary(shard.routingEntry()),
+ shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper);
+
+ recoveryShardFromStore(newShard);
+
+ try (Engine.Searcher searcher = newShard.acquireSearcher("test")) {
+ TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10);
+ assertEquals(search.totalHits, 0);
+ search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10);
+ assertEquals(search.totalHits, 1);
}
- }
+ getResult = newShard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1"))));
+ assertTrue(getResult.exists());
+ assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
+ assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader);
+ getResult.release();
- public void testSearcherWrapperWorksWithGlobaOrdinals() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = indexService.getShardOrNull(0);
- client().admin().indices().preparePutMapping("test").setType("test").setSource("foo", "type=text,fielddata=true").get();
- client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
- client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
+ closeShards(newShard);
+ }
+ public void testSearcherWrapperWorksWithGlobalOrdinals() throws IOException {
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
@@ -1353,46 +1072,52 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
};
- shard.close("simon says", true);
- IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper);
- try {
- // test global ordinals are evicted
- MappedFieldType foo = newShard.mapperService().fullName("foo");
- IndexFieldData.Global ifd = shard.indexFieldDataService().getForField(foo);
- FieldDataStats before = shard.fieldData().stats("foo");
- assertThat(before.getMemorySizeInBytes(), equalTo(0L));
- FieldDataStats after = null;
- try (Engine.Searcher searcher = newShard.acquireSearcher("test")) {
- assumeTrue("we have to have more than one segment", searcher.getDirectoryReader().leaves().size() > 1);
- ifd.loadGlobal(searcher.getDirectoryReader());
- after = shard.fieldData().stats("foo");
- assertEquals(after.getEvictions(), before.getEvictions());
- // If a field doesn't exist an empty IndexFieldData is returned and that isn't cached:
- assertThat(after.getMemorySizeInBytes(), equalTo(0L));
- }
- assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
- assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes());
- newShard.flush(new FlushRequest().force(true).waitIfOngoing(true));
- newShard.refresh("test");
- assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes());
- assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
- } finally {
- newShard.close("just do it", randomBoolean());
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .build();
+ IndexMetaData metaData = IndexMetaData.builder("test")
+ .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}")
+ .settings(settings)
+ .primaryTerm(0, 1).build();
+ IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, wrapper);
+ recoveryShardFromStore(shard);
+ indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+ shard.refresh("created segment 1");
+ indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}");
+ shard.refresh("created segment 2");
+
+ // test global ordinals are evicted
+ MappedFieldType foo = shard.mapperService().fullName("foo");
+ IndexFieldData.Global ifd = shard.indexFieldDataService().getForField(foo);
+ FieldDataStats before = shard.fieldData().stats("foo");
+ assertThat(before.getMemorySizeInBytes(), equalTo(0L));
+ FieldDataStats after = null;
+ try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
+ assertThat("we have to have more than one segment", searcher.getDirectoryReader().leaves().size(), greaterThan(1));
+ ifd.loadGlobal(searcher.getDirectoryReader());
+ after = shard.fieldData().stats("foo");
+ assertEquals(after.getEvictions(), before.getEvictions());
+ // If a field doesn't exist an empty IndexFieldData is returned and that isn't cached:
+ assertThat(after.getMemorySizeInBytes(), equalTo(0L));
}
+ assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
+ assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes());
+ shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
+ shard.refresh("test");
+ assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes());
+ assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
+
+ closeShards(shard);
}
- public void testIndexingOperationListnenersIsInvokedOnRecovery() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = indexService.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get();
- client().prepareDelete("test", "test", "0").get();
- client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
-
- IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
- shard.close("simon says", false);
+ public void testIndexingOperationListenersIsInvokedOnRecovery() throws IOException {
+ IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+ deleteDoc(shard, "test", "0");
+ indexDoc(shard, "test", "1", "{\"foo\" : \"bar\"}");
+ shard.refresh("test");
+
final AtomicInteger preIndex = new AtomicInteger();
final AtomicInteger postIndex = new AtomicInteger();
final AtomicInteger preDelete = new AtomicInteger();
@@ -1421,85 +1146,27 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
};
- final IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper, listener);
- try {
- IndexingStats indexingStats = newShard.indexingStats();
- // ensure we are not influencing the indexing stats
- assertEquals(0, indexingStats.getTotal().getDeleteCount());
- assertEquals(0, indexingStats.getTotal().getDeleteCurrent());
- assertEquals(0, indexingStats.getTotal().getIndexCount());
- assertEquals(0, indexingStats.getTotal().getIndexCurrent());
- assertEquals(0, indexingStats.getTotal().getIndexFailedCount());
- assertEquals(2, preIndex.get());
- assertEquals(2, postIndex.get());
- assertEquals(1, preDelete.get());
- assertEquals(1, postDelete.get());
- } finally {
- newShard.close("just do it", randomBoolean());
- }
- }
-
- public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = indexService.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get();
- client().prepareDelete("test", "test", "0").get();
- client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
-
- IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
- shard.close("simon says", false);
- AtomicReference<IndexShard> shardRef = new AtomicReference<>();
- List<Exception> failures = new ArrayList<>();
- IndexingOperationListener listener = new IndexingOperationListener() {
-
- @Override
- public void postIndex(Engine.Index index, boolean created) {
- try {
- assertNotNull(shardRef.get());
- // this is all IMC needs to do - check current memory and refresh
- assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
- shardRef.get().refresh("test");
- } catch (Exception e) {
- failures.add(e);
- throw e;
- }
- }
-
-
- @Override
- public void postDelete(Engine.Delete delete) {
- try {
- assertNotNull(shardRef.get());
- // this is all IMC needs to do - check current memory and refresh
- assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
- shardRef.get().refresh("test");
- } catch (Exception e) {
- failures.add(e);
- throw e;
- }
- }
- };
- final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener);
- shardRef.set(newShard);
- recoverShard(newShard);
+ final IndexShard newShard = reinitShard(shard, listener);
+ recoveryShardFromStore(newShard);
+ IndexingStats indexingStats = newShard.indexingStats();
+ // ensure we are not influencing the indexing stats
+ assertEquals(0, indexingStats.getTotal().getDeleteCount());
+ assertEquals(0, indexingStats.getTotal().getDeleteCurrent());
+ assertEquals(0, indexingStats.getTotal().getIndexCount());
+ assertEquals(0, indexingStats.getTotal().getIndexCurrent());
+ assertEquals(0, indexingStats.getTotal().getIndexFailedCount());
+ assertEquals(2, preIndex.get());
+ assertEquals(2, postIndex.get());
+ assertEquals(1, preDelete.get());
+ assertEquals(1, postDelete.get());
- try {
- ExceptionsHelper.rethrowAndSuppress(failures);
- } finally {
- newShard.close("just do it", randomBoolean());
- }
+ closeShards(newShard);
}
public void testSearchIsReleaseIfWrapperFails() throws IOException {
- createIndex("test");
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService indexService = indicesService.indexService(resolveIndex("test"));
- IndexShard shard = indexService.getShardOrNull(0);
- client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
+ IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+ shard.refresh("test");
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
@@ -1511,180 +1178,146 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
};
- shard.close("simon says", true);
- IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper);
+ closeShards(shard);
+ IndexShard newShard = newShard(ShardRoutingHelper.reinitPrimary(shard.routingEntry()),
+ shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper);
+
+ recoveryShardFromStore(newShard);
+
try {
newShard.acquireSearcher("test");
fail("exception expected");
} catch (RuntimeException ex) {
//
- } finally {
- newShard.close("just do it", randomBoolean());
}
- }
-
- public static final IndexShard reinitWithWrapper(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, IndexingOperationListener... listeners) throws IOException {
- IndexShard newShard = newIndexShard(indexService, shard, wrapper, listeners);
- return recoverShard(newShard);
- }
-
- public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
- DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), newShard.routingEntry().primary(), RecoveryState.Type.STORE, localNode, localNode));
- assertTrue(newShard.recoverFromStore());
- newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
- return newShard;
- }
-
- public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, IndexingOperationListener... listeners) throws IOException {
- ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
- IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
- shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
- indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
- indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners));
- return newShard;
- }
-
- private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
- ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(),
- existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING,
- existingShardRouting.allocationId());
- shardRouting = shardRouting.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"));
- return shardRouting;
+ closeShards(newShard);
}
public void testTranslogRecoverySyncsTranslog() throws IOException {
- createIndex("testindexfortranslogsync");
- client().admin().indices().preparePutMapping("testindexfortranslogsync").setType("testtype").setSource(jsonBuilder().startObject()
- .startObject("testtype")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync"));
- IndexShard shard = test.getShardOrNull(0);
- ShardRouting routing = getInitializingShardRouting(shard.routingEntry());
- test.removeShard(0, "b/c britta says so");
- IndexShard newShard = test.createShard(routing);
- DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode));
- List<Translog.Operation> operations = new ArrayList<>();
- operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes())));
- newShard.prepareForIndexRecovery();
- newShard.recoveryState().getTranslog().totalOperations(operations.size());
- newShard.skipTranslogRecovery();
- newShard.performBatchRecovery(operations);
- assertFalse(newShard.getTranslog().syncNeeded());
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .build();
+ IndexMetaData metaData = IndexMetaData.builder("test")
+ .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
+ .settings(settings)
+ .primaryTerm(0, 1).build();
+ IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
+ recoveryShardFromStore(primary);
+
+ indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+ IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null);
+ recoverReplica(replica, primary, (shard, discoveryNode) ->
+ new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> {
+ }) {
+ @Override
+ public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
+ super.indexTranslogOperations(operations, totalTranslogOps);
+ assertFalse(replica.getTranslog().syncNeeded());
+ }
+ }, true);
+
+ closeShards(primary, replica);
}
- public void testIndexingBufferDuringInternalRecovery() throws IOException {
- createIndex("index");
- client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
- .startObject("testtype")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("index"));
- IndexShard shard = test.getShardOrNull(0);
- ShardRouting routing = getInitializingShardRouting(shard.routingEntry());
- test.removeShard(0, "b/c britta says so");
- IndexShard newShard = test.createShard(routing);
- newShard.shardRouting = routing;
+ public void testShardActiveDuringInternalRecovery() throws IOException {
+ IndexShard shard = newStartedShard(true);
+ indexDoc(shard, "type", "0");
+ shard = reinitShard(shard);
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode));
+ shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, null));
// Shard is still inactive since we haven't started recovering yet
- assertFalse(newShard.isActive());
- newShard.prepareForIndexRecovery();
+ assertFalse(shard.isActive());
+ shard.prepareForIndexRecovery();
// Shard is still inactive since we haven't started recovering yet
- assertFalse(newShard.isActive());
- newShard.performTranslogRecovery(true);
+ assertFalse(shard.isActive());
+ shard.performTranslogRecovery(true);
// Shard should now be active since we did recover:
- assertTrue(newShard.isActive());
+ assertTrue(shard.isActive());
+ closeShards(shard);
}
- public void testIndexingBufferDuringPeerRecovery() throws IOException {
- createIndex("index");
- client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
- .startObject("testtype")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
- ensureGreen();
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("index"));
- IndexShard shard = test.getShardOrNull(0);
- ShardRouting routing = getInitializingShardRouting(shard.routingEntry());
- test.removeShard(0, "b/c britta says so");
- IndexShard newShard = test.createShard(routing);
+ public void testShardActiveDuringPeerRecovery() throws IOException {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .build();
+ IndexMetaData metaData = IndexMetaData.builder("test")
+ .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
+ .settings(settings)
+ .primaryTerm(0, 1).build();
+ IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
+ recoveryShardFromStore(primary);
+
+ indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+ IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null);
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode));
- // Shard is still inactive since we haven't started recovering yet
- assertFalse(newShard.isActive());
- List<Translog.Operation> operations = new ArrayList<>();
- operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes())));
- newShard.prepareForIndexRecovery();
- newShard.skipTranslogRecovery();
+ replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode));
// Shard is still inactive since we haven't started recovering yet
- assertFalse(newShard.isActive());
- newShard.performBatchRecovery(operations);
- // Shard should now be active since we did recover:
- assertTrue(newShard.isActive());
+ assertFalse(replica.isActive());
+ recoverReplica(replica, primary, (shard, discoveryNode) ->
+ new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> {
+ }) {
+ @Override
+ public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
+ super.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp);
+ // Shard is still inactive since we haven't started recovering yet
+ assertFalse(replica.isActive());
+
+ }
+
+ @Override
+ public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
+ super.indexTranslogOperations(operations, totalTranslogOps);
+ // Shard should now be active since we did recover:
+ assertTrue(replica.isActive());
+ }
+ }, false);
+
+ closeShards(primary, replica);
}
public void testRecoverFromLocalShard() throws IOException {
- createIndex("index");
- createIndex("index_1");
- createIndex("index_2");
- client().admin().indices().preparePutMapping("index").setType("test").setSource(jsonBuilder().startObject()
- .startObject("test")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
- client().prepareIndex("index", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
- client().prepareIndex("index", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
-
-
- IndicesService indicesService = getInstanceFromNode(IndicesService.class);
- IndexService test = indicesService.indexService(resolveIndex("index_1"));
- IndexShard shard = test.getShardOrNull(0);
- ShardRouting routing = ShardRoutingHelper.initWithSameId(shard.routingEntry());
- test.removeShard(0, "b/c simon says so");
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .build();
+ IndexMetaData metaData = IndexMetaData.builder("source")
+ .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
+ .settings(settings)
+ .primaryTerm(0, 1).build();
+
+ IndexShard sourceShard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
+ recoveryShardFromStore(sourceShard);
+
+ indexDoc(sourceShard, "test", "0", "{\"foo\" : \"bar\"}");
+ indexDoc(sourceShard, "test", "1", "{\"foo\" : \"bar\"}");
+ sourceShard.refresh("test");
+
+
+ ShardRouting targetRouting = TestShardRouting.newShardRouting(new ShardId("index_1", "index_1", 0), "n1", true,
+ ShardRoutingState.INITIALIZING, RecoverySource.LocalShardsRecoverySource.INSTANCE);
+
+ final IndexShard targetShard;
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
+ Map<String, MappingMetaData> requestedMappingUpdates = ConcurrentCollections.newConcurrentMap();
{
- final IndexShard newShard = test.createShard(routing);
- newShard.updateRoutingEntry(routing);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.LOCAL_SHARDS, localNode, localNode));
+ targetShard = newShard(targetRouting);
+ targetShard.markAsRecovering("store", new RecoveryState(targetShard.routingEntry(), localNode, null));
BiConsumer<String, MappingMetaData> mappingConsumer = (type, mapping) -> {
- try {
- client().admin().indices().preparePutMapping().setConcreteIndex(newShard.indexSettings().getIndex())
- .setType(type)
- .setSource(mapping.source().string())
- .get();
- } catch (IOException ex) {
- throw new ElasticsearchException("failed to stringify mapping source", ex);
- }
+ assertNull(requestedMappingUpdates.put(type, mapping));
};
+
+ final IndexShard differentIndex = newShard(new ShardId("index_2", "index_2", 0), true);
+ recoveryShardFromStore(differentIndex);
expectThrows(IllegalArgumentException.class, () -> {
- IndexService index = indicesService.indexService(resolveIndex("index"));
- IndexService index_2 = indicesService.indexService(resolveIndex("index_2"));
- newShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(index.getShard(0), index_2.getShard(0)));
+ targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard, differentIndex));
});
+ closeShards(differentIndex);
- IndexService indexService = indicesService.indexService(resolveIndex("index"));
- assertTrue(newShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(indexService.getShard(0))));
- RecoveryState recoveryState = newShard.recoveryState();
+ assertTrue(targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard)));
+ RecoveryState recoveryState = targetShard.recoveryState();
assertEquals(RecoveryState.Stage.DONE, recoveryState.getStage());
assertTrue(recoveryState.getIndex().fileDetails().size() > 0);
for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) {
@@ -1694,95 +1327,114 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertEquals(file.recovered(), file.length());
}
}
- routing = ShardRoutingHelper.moveToStarted(routing);
- newShard.updateRoutingEntry(routing);
- assertHitCount(client().prepareSearch("index_1").get(), 2);
+ targetShard.updateRoutingEntry(ShardRoutingHelper.moveToStarted(targetShard.routingEntry()));
+ assertDocCount(targetShard, 2);
}
// now check that it's persistent ie. that the added shards are committed
{
- routing = shard.routingEntry();
- test.removeShard(0, "b/c simon says so");
- routing = ShardRoutingHelper.reinit(routing);
- final IndexShard newShard = test.createShard(routing);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.LOCAL_SHARDS, localNode, localNode));
- assertTrue(newShard.recoverFromStore());
- routing = ShardRoutingHelper.moveToStarted(routing);
- newShard.updateRoutingEntry(routing);
- assertHitCount(client().prepareSearch("index_1").get(), 2);
+ final IndexShard newShard = reinitShard(targetShard);
+ recoveryShardFromStore(newShard);
+ assertDocCount(newShard, 2);
+ closeShards(newShard);
}
- GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("index_1").get();
- ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = mappingsResponse.getMappings();
- assertNotNull(mappings.get("index_1"));
- assertNotNull(mappings.get("index_1").get("test"));
- assertEquals(mappings.get("index_1").get("test").get().source().string(), "{\"test\":{\"properties\":{\"foo\":{\"type\":\"text\"}}}}");
+ assertThat(requestedMappingUpdates, hasKey("test"));
+ assertThat(requestedMappingUpdates.get("test").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
+ closeShards(sourceShard, targetShard);
}
/** A dummy repository for testing which just needs restore overridden */
private abstract static class RestoreOnlyRepository extends AbstractLifecycleComponent implements Repository {
private final String indexName;
+
public RestoreOnlyRepository(String indexName) {
super(Settings.EMPTY);
this.indexName = indexName;
}
+
@Override
- protected void doStart() {}
+ protected void doStart() {
+ }
+
@Override
- protected void doStop() {}
+ protected void doStop() {
+ }
+
@Override
- protected void doClose() {}
+ protected void doClose() {
+ }
+
@Override
public RepositoryMetaData getMetadata() {
return null;
}
+
@Override
public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) {
return null;
}
+
@Override
public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List<IndexId> indices) throws IOException {
return null;
}
+
@Override
public RepositoryData getRepositoryData() {
Map<IndexId, Set<SnapshotId>> map = new HashMap<>();
- map.put(new IndexId(indexName, "blah"), Collections.emptySet());
+ map.put(new IndexId(indexName, "blah"), emptySet());
return new RepositoryData(Collections.emptyList(), map);
}
+
@Override
- public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData metaData) {}
+ public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData metaData) {
+ }
+
@Override
public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List<IndexId> indices, long startTime, String failure, int totalShards, List<SnapshotShardFailure> shardFailures) {
return null;
}
+
@Override
- public void deleteSnapshot(SnapshotId snapshotId) {}
+ public void deleteSnapshot(SnapshotId snapshotId) {
+ }
+
@Override
public long getSnapshotThrottleTimeInNanos() {
return 0;
}
+
@Override
public long getRestoreThrottleTimeInNanos() {
return 0;
}
+
@Override
public String startVerification() {
return null;
}
+
@Override
- public void endVerification(String verificationToken) {}
+ public void endVerification(String verificationToken) {
+ }
+
@Override
public boolean isReadOnly() {
return false;
}
+
@Override
- public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {}
+ public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
+ }
+
@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
return null;
}
+
@Override
- public void verify(String verificationToken, DiscoveryNode localNode) {}
+ public void verify(String verificationToken, DiscoveryNode localNode) {
+ }
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
index 20fd02b516..05147d4a72 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
@@ -29,6 +29,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@@ -36,7 +37,6 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
-import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.codec.CodecService;
@@ -66,7 +66,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
-import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
@@ -125,8 +124,9 @@ public class RefreshListenersTests extends ESTestCase {
store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(),
iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger),
IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
- TimeValue.timeValueMinutes(5), listeners);
+ TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
engine = new InternalEngine(config);
+ listeners.setTranslog(engine.getTranslog());
}
@After
diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java
index e960622d1c..34c1789824 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java
@@ -28,6 +28,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
+import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@@ -64,4 +65,7 @@ public class ShardUtilsTests extends ESTestCase {
IOUtils.close(writer, dir);
}
+ public static Engine getShardEngine(IndexShard shard) {
+ return shard.getEngine();
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
index 6facd70acf..a996c9f4bd 100644
--- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
+++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
@@ -61,7 +61,7 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
@@ -159,7 +159,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
indexRandom(true, builders);
ensureGreen();
- assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
@@ -176,7 +176,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
ClusterHealthResponse health = client().admin().cluster()
.health(Requests.clusterHealthRequest("test").waitForGreenStatus()
.timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough.
- .waitForRelocatingShards(0)).actionGet();
+ .waitForNoRelocatingShards(true)).actionGet();
if (health.isTimedOut()) {
logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
@@ -262,7 +262,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
indexRandom(true, builders);
ensureGreen();
- assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
@@ -347,7 +347,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
- if (corrupt.get() && action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
+ if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes;
int i = randomIntBetween(0, req.content().length() - 1);
@@ -408,7 +408,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
indexRandom(true, builders);
ensureGreen();
- assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
@@ -419,7 +419,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
- if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
+ if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
@@ -473,7 +473,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
* TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several
* parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard.
*/
- @TestLogging("monitor.fs:DEBUG")
+ @TestLogging("org.elasticsearch.monitor.fs:DEBUG")
public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
@@ -491,7 +491,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
indexRandom(true, builders);
ensureGreen();
- assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
@@ -546,7 +546,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
indexRandom(true, builders);
ensureGreen();
- assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
diff --git a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java
index b43595b4b9..aa9de8de87 100644
--- a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java
+++ b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java
@@ -20,13 +20,20 @@ package org.elasticsearch.index.store;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.test.ESIntegTestCase;
@@ -44,6 +51,7 @@ import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
@@ -53,7 +61,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
-@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE)
+@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2,
+ supportsDedicatedMasters = false, numClientNodes = 1, transportClientRatio = 0.0)
public class ExceptionRetryIT extends ESIntegTestCase {
@Override
@@ -68,40 +77,46 @@ public class ExceptionRetryIT extends ESIntegTestCase {
}
/**
- * Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally failing.
- * If auto generated ids are used this must not lead to duplicate ids
+ * Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally
+ * failing. If auto generated ids are used this must not lead to duplicate ids
* see https://github.com/elastic/elasticsearch/issues/8788
*/
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
int numDocs = scaledRandomIntBetween(100, 1000);
+ Client client = internalCluster().coordOnlyNodeClient();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
- NodeStats unluckyNode = randomFrom(nodeStats.getNodes());
- assertAcked(client().admin().indices().prepareCreate("index"));
+ NodeStats unluckyNode = randomFrom(nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode())
+ .collect(Collectors.toList()));
+ assertAcked(client().admin().indices().prepareCreate("index").setSettings(Settings.builder()
+ .put("index.number_of_replicas", 1)
+ .put("index.number_of_shards", 5)));
ensureGreen("index");
-
+ logger.info("unlucky node: {}", unluckyNode.getNode());
//create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
for (NodeStats dataNode : nodeStats.getNodes()) {
- MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
- mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class,
+ dataNode.getNode().getName()));
+ mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
+ new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
- public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request,
+ TransportRequestOptions options) throws IOException, TransportException {
super.sendRequest(node, requestId, action, request, options);
- if (action.equals(TransportShardBulkAction.ACTION_NAME) && !exceptionThrown.get()) {
+ if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) {
logger.debug("Throw ConnectTransportException");
- exceptionThrown.set(true);
throw new ConnectTransportException(node, action);
}
}
});
}
- BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ BulkRequestBuilder bulkBuilder = client.prepareBulk();
for (int i = 0; i < numDocs; i++) {
XContentBuilder doc = null;
doc = jsonBuilder().startObject().field("foo", "bar").endObject();
- bulkBuilder.add(client().prepareIndex("index", "type").setSource(doc));
+ bulkBuilder.add(client.prepareIndex("index", "type").setSource(doc));
}
BulkResponse response = bulkBuilder.get();
@@ -122,7 +137,8 @@ public class ExceptionRetryIT extends ESIntegTestCase {
for (int i = 0; i < searchResponse.getHits().getHits().length; i++) {
if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) {
if (!found_duplicate_already) {
- SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
+ SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id",
+ searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
assertThat(dupIdResponse.getHits().totalHits(), greaterThan(1L));
logger.info("found a duplicate id:");
for (SearchHit hit : dupIdResponse.getHits()) {
@@ -137,5 +153,16 @@ public class ExceptionRetryIT extends ESIntegTestCase {
assertSearchResponse(searchResponse);
assertThat(dupCounter, equalTo(0L));
assertHitCount(searchResponse, numDocs);
+ IndicesStatsResponse index = client().admin().indices().prepareStats("index").clear().setSegments(true).get();
+ IndexStats indexStats = index.getIndex("index");
+ long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
+ for (IndexShardStats indexShardStats : indexStats) {
+ for (ShardStats shardStats : indexShardStats) {
+ SegmentsStats segments = shardStats.getStats().getSegments();
+ maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp());
+ }
+ }
+ assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get());
+ assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1);
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java
index 25199caff9..a279767ea6 100644
--- a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java
+++ b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java
@@ -46,7 +46,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
@@ -354,49 +353,6 @@ public class StoreTests extends ESTestCase {
IOUtils.close(store);
}
- public void testRenameFile() throws IOException {
- final ShardId shardId = new ShardId("index", "_na_", 1);
- DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
- Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
- {
- IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
- int iters = scaledRandomIntBetween(10, 100);
- for (int i = 0; i < iters; i++) {
- BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
- output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
- }
- CodecUtil.writeFooter(output);
- output.close();
- }
- store.renameFile("foo.bar", "bar.foo");
- assertThat(numNonExtraFiles(store), is(1));
- final long lastChecksum;
- try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
- lastChecksum = CodecUtil.checksumEntireFile(input);
- }
-
- try {
- store.directory().openInput("foo.bar", IOContext.DEFAULT);
- fail("file was renamed");
- } catch (FileNotFoundException | NoSuchFileException ex) {
- // expected
- }
- {
- IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
- int iters = scaledRandomIntBetween(10, 100);
- for (int i = 0; i < iters; i++) {
- BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
- output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
- }
- CodecUtil.writeFooter(output);
- output.close();
- }
- store.renameFile("foo.bar", "bar.foo");
- assertThat(numNonExtraFiles(store), is(1));
- assertDeleteContent(store, directoryService);
- IOUtils.close(store);
- }
-
public void testCheckIntegrity() throws IOException {
Directory dir = newDirectory();
long luceneFileLength = 0;
@@ -519,9 +475,6 @@ public class StoreTests extends ESTestCase {
public LuceneManagedDirectoryService(Random random, boolean preventDoubleWrite) {
super(new ShardId(INDEX_SETTINGS.getIndex(), 1), INDEX_SETTINGS);
dir = StoreTests.newDirectory(random);
- if (dir instanceof MockDirectoryWrapper) {
- ((MockDirectoryWrapper) dir).setPreventDoubleWrite(preventDoubleWrite);
- }
this.random = random;
}
@@ -812,7 +765,7 @@ public class StoreTests extends ESTestCase {
initialStoreSize += store.directory().fileLength(extraFiles);
}
StoreStats stats = store.stats();
- assertEquals(stats.getSize().bytes(), initialStoreSize);
+ assertEquals(stats.getSize().getBytes(), initialStoreSize);
Directory dir = store.directory();
final long length;
@@ -963,11 +916,8 @@ public class StoreTests extends ESTestCase {
}
writer.commit();
writer.close();
- MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
- if (leaf != null) {
- leaf.setPreventDoubleWrite(false); // I do this on purpose
- }
SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo();
+ store.directory().deleteFile(segmentCommitInfos.getSegmentsFileName());
try (IndexOutput out = store.directory().createOutput(segmentCommitInfos.getSegmentsFileName(), IOContext.DEFAULT)) {
// empty file
}
diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
index 12ac2910a4..cb76763363 100644
--- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
+++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
@@ -20,6 +20,8 @@
package org.elasticsearch.index.translog;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.Term;
import org.apache.lucene.mockfile.FilterFileChannel;
@@ -83,14 +85,12 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
-/**
- *
- */
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
public class TranslogTests extends ESTestCase {
@@ -207,53 +207,6 @@ public class TranslogTests extends ESTestCase {
return string;
}
- public void testRead() throws IOException {
- Location loc0 = translog.getLastWriteLocation();
- assertNotNull(loc0);
-
- Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1}));
- assertThat(loc1, greaterThan(loc0));
- assertThat(translog.getLastWriteLocation(), greaterThan(loc1));
- Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2}));
- assertThat(loc2, greaterThan(loc1));
- assertThat(translog.getLastWriteLocation(), greaterThan(loc2));
- assertThat(translog.read(loc1).getSource().source, equalTo(new BytesArray(new byte[]{1})));
- assertThat(translog.read(loc2).getSource().source, equalTo(new BytesArray(new byte[]{2})));
-
- Translog.Location lastLocBeforeSync = translog.getLastWriteLocation();
- translog.sync();
- assertEquals(lastLocBeforeSync, translog.getLastWriteLocation());
- assertThat(translog.read(loc1).getSource().source, equalTo(new BytesArray(new byte[]{1})));
- assertThat(translog.read(loc2).getSource().source, equalTo(new BytesArray(new byte[]{2})));
-
- Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3}));
- assertThat(loc3, greaterThan(loc2));
- assertThat(translog.getLastWriteLocation(), greaterThan(loc3));
- assertThat(translog.read(loc3).getSource().source, equalTo(new BytesArray(new byte[]{3})));
-
- lastLocBeforeSync = translog.getLastWriteLocation();
- translog.sync();
- assertEquals(lastLocBeforeSync, translog.getLastWriteLocation());
- assertThat(translog.read(loc3).getSource().source, equalTo(new BytesArray(new byte[]{3})));
- translog.prepareCommit();
- /*
- * The commit adds to the lastWriteLocation even though is isn't really a write. This is just an implementation artifact but it can
- * safely be ignored because the lastWriteLocation continues to be greater than the Location returned from the last write operation
- * and less than the location of the next write operation.
- */
- assertThat(translog.getLastWriteLocation(), greaterThan(lastLocBeforeSync));
- assertThat(translog.read(loc3).getSource().source, equalTo(new BytesArray(new byte[]{3})));
- translog.commit();
- assertNull(translog.read(loc1));
- assertNull(translog.read(loc2));
- assertNull(translog.read(loc3));
- try {
- translog.read(new Translog.Location(translog.currentFileGeneration() + 1, 17, 35));
- fail("generation is greater than the current");
- } catch (IllegalStateException ex) {
- // expected
- }
- }
public void testSimpleOperations() throws IOException {
ArrayList<Translog.Operation> ops = new ArrayList<>();
@@ -345,7 +298,7 @@ public class TranslogTests extends ESTestCase {
assertThat(stats.estimatedNumberOfOperations(), equalTo(0L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(firstOperationPosition));
assertEquals(6, total.estimatedNumberOfOperations());
- assertEquals(431, total.getTranslogSizeInBytes());
+ assertEquals(455, total.getTranslogSizeInBytes());
BytesStreamOutput out = new BytesStreamOutput();
total.writeTo(out);
@@ -353,14 +306,13 @@ public class TranslogTests extends ESTestCase {
copy.readFrom(out.bytes().streamInput());
assertEquals(6, copy.estimatedNumberOfOperations());
- assertEquals(431, copy.getTranslogSizeInBytes());
+ assertEquals(455, copy.getTranslogSizeInBytes());
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
builder.startObject();
copy.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
-
- assertEquals("{\"translog\":{\"operations\":6,\"size_in_bytes\":431}}", builder.string());
+ assertEquals("{\"translog\":{\"operations\":6,\"size_in_bytes\":455}}", builder.string());
}
try {
@@ -443,7 +395,7 @@ public class TranslogTests extends ESTestCase {
assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(Translog.getFilename(id))));
}
- static class LocationOperation {
+ static class LocationOperation implements Comparable<LocationOperation> {
final Translog.Operation operation;
final Translog.Location location;
@@ -452,6 +404,10 @@ public class TranslogTests extends ESTestCase {
this.location = location;
}
+ @Override
+ public int compareTo(LocationOperation o) {
+ return location.compareTo(o.location);
+ }
}
public void testConcurrentWritesWithVaryingSize() throws Throwable {
@@ -480,8 +436,12 @@ public class TranslogTests extends ESTestCase {
threads[i].join(60 * 1000);
}
- for (LocationOperation locationOperation : writtenOperations) {
- Translog.Operation op = translog.read(locationOperation.location);
+ List<LocationOperation> collect = writtenOperations.stream().collect(Collectors.toList());
+ Collections.sort(collect);
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ for (LocationOperation locationOperation : collect) {
+ Translog.Operation op = snapshot.next();
+ assertNotNull(op);
Translog.Operation expectedOp = locationOperation.operation;
assertEquals(expectedOp.opType(), op.opType());
switch (op.opType()) {
@@ -507,6 +467,7 @@ public class TranslogTests extends ESTestCase {
}
}
+ assertNull(snapshot.next());
}
@@ -523,13 +484,16 @@ public class TranslogTests extends ESTestCase {
corruptTranslogs(translogDir);
AtomicInteger corruptionsCaught = new AtomicInteger(0);
+ Translog.Snapshot snapshot = translog.newSnapshot();
for (Translog.Location location : locations) {
try {
- translog.read(location);
+ Translog.Operation next = snapshot.next();
+ assertNotNull(next);
} catch (TranslogCorruptedException e) {
corruptionsCaught.incrementAndGet();
}
}
+ expectThrows(TranslogCorruptedException.class, () -> snapshot.next());
assertThat("at least one corruption was caused and caught", corruptionsCaught.get(), greaterThanOrEqualTo(1));
}
@@ -546,15 +510,12 @@ public class TranslogTests extends ESTestCase {
truncateTranslogs(translogDir);
AtomicInteger truncations = new AtomicInteger(0);
+ Translog.Snapshot snap = translog.newSnapshot();
for (Translog.Location location : locations) {
try {
- translog.read(location);
- } catch (ElasticsearchException e) {
- if (e.getCause() instanceof EOFException) {
- truncations.incrementAndGet();
- } else {
- throw e;
- }
+ assertNotNull(snap.next());
+ } catch (EOFException e) {
+ truncations.incrementAndGet();
}
}
assertThat("at least one truncation was caused and caught", truncations.get(), greaterThanOrEqualTo(1));
@@ -670,7 +631,7 @@ public class TranslogTests extends ESTestCase {
@Override
public void onFailure(Exception e) {
- logger.error("--> writer [{}] had an error", e, threadName);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e);
errors.add(e);
}
}, threadName);
@@ -685,7 +646,7 @@ public class TranslogTests extends ESTestCase {
@Override
public void onFailure(Exception e) {
- logger.error("--> reader [{}] had an error", e, threadId);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e);
errors.add(e);
try {
closeView();
@@ -813,6 +774,38 @@ public class TranslogTests extends ESTestCase {
}
}
+ public void testSyncUpToStream() throws IOException {
+ int iters = randomIntBetween(5, 10);
+ for (int i = 0; i < iters; i++) {
+ int translogOperations = randomIntBetween(10, 100);
+ int count = 0;
+ ArrayList<Location> locations = new ArrayList<>();
+ for (int op = 0; op < translogOperations; op++) {
+ if (rarely()) {
+ translog.commit(); // do this first so that there is at least one pending tlog entry
+ }
+ final Translog.Location location = translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
+ locations.add(location);
+ }
+ Collections.shuffle(locations, random());
+ if (randomBoolean()) {
+ assertTrue("at least one operation pending", translog.syncNeeded());
+ assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
+ assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
+ } else if (rarely()) {
+ translog.commit();
+ assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
+ assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
+ } else {
+ translog.sync();
+ assertFalse("translog has been synced already", translog.ensureSynced(locations.stream()));
+ }
+ for (Location location : locations) {
+ assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location));
+ }
+ }
+ }
+
public void testLocationComparison() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
@@ -830,8 +823,14 @@ public class TranslogTests extends ESTestCase {
}
assertEquals(max.generation, translog.currentFileGeneration());
- final Translog.Operation read = translog.read(max);
- assertEquals(read.getSource().source.utf8ToString(), Integer.toString(count));
+ Translog.Snapshot snap = translog.newSnapshot();
+ Translog.Operation next;
+ Translog.Operation maxOp = null;
+ while ((next = snap.next()) != null) {
+ maxOp = next;
+ }
+ assertNotNull(maxOp);
+ assertEquals(maxOp.getSource().source.utf8ToString(), Integer.toString(count));
}
public static Translog.Location max(Translog.Location a, Translog.Location b) {
@@ -854,30 +853,24 @@ public class TranslogTests extends ESTestCase {
}
}
assertEquals(translogOperations, translog.totalOperations());
- final Translog.Location lastLocation = translog.add(new Translog.Index("test", "" + translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
try (final TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
assertEquals(lastSynced + 1, reader.totalOperations());
+ Translog.Snapshot snapshot = reader.newSnapshot();
+
for (int op = 0; op < translogOperations; op++) {
- Translog.Location location = locations.get(op);
if (op <= lastSynced) {
- final Translog.Operation read = reader.read(location);
+ final Translog.Operation read = snapshot.next();
assertEquals(Integer.toString(op), read.getSource().source.utf8ToString());
} else {
- try {
- reader.read(location);
- fail("read past checkpoint");
- } catch (EOFException ex) {
-
- }
+ Translog.Operation next = snapshot.next();
+ assertNull(next);
}
}
- try {
- reader.read(lastLocation);
- fail("read past checkpoint");
- } catch (EOFException ex) {
- }
+ Translog.Operation next = snapshot.next();
+ assertNull(next);
}
assertEquals(translogOperations + 1, translog.totalOperations());
translog.close();
@@ -1135,7 +1128,7 @@ public class TranslogTests extends ESTestCase {
try (Translog translog = new Translog(config, translogGeneration)) {
fail("corrupted");
} catch (IllegalStateException ex) {
- assertEquals(ex.getMessage(), "Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=2683, numOps=55, translogFileGeneration= 2} but got: Checkpoint{offset=0, numOps=0, translogFileGeneration= 0}");
+ assertEquals(ex.getMessage(), "Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, numOps=55, translogFileGeneration= 2} but got: Checkpoint{offset=0, numOps=0, translogFileGeneration= 0}");
}
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
try (Translog translog = new Translog(config, translogGeneration)) {
@@ -1588,11 +1581,6 @@ public class TranslogTests extends ESTestCase {
}
};
}
-
- @Override
- protected boolean assertBytesAtLocation(Location location, BytesReference expectedBytes) throws IOException {
- return true; // we don't wanna fail in the assert
- }
};
}
@@ -1925,4 +1913,8 @@ public class TranslogTests extends ESTestCase {
IOUtils.close(view);
translog = new Translog(config, generation);
}
+
+ public static Translog.Location randomTranslogLocation() {
+ return new Translog.Location(randomLong(), randomLong(), randomInt());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java
index a174f80775..1fab2a3808 100644
--- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java
+++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java
@@ -18,16 +18,26 @@
*/
package org.elasticsearch.indexing;
+import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Random;
@@ -36,7 +46,9 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicIntegerArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
@@ -227,4 +239,27 @@ public class IndexActionIT extends ESIntegTestCase {
}
}
+ public void testDocumentWithBlankFieldName() {
+ MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
+ client().prepareIndex("test", "type", "1").setSource("", "value1_2").execute().actionGet();
+ }
+ );
+ assertThat(e.getMessage(), containsString("failed to parse"));
+ assertThat(e.getRootCause().getMessage(), containsString("name cannot be empty string"));
+ }
+
+ @Override
+ protected Collection<Class<? extends Plugin>> nodePlugins() {
+ return Collections.singleton(InternalSettingsPlugin.class); // uses index.version.created
+ }
+
+ public void testDocumentWithBlankFieldName2x() {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_4);
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
+ assertAcked(prepareCreate("test1").setSettings(settings));
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test1", "type", "1").setSource("", "value1_2").execute().actionGet();
+ assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java
index f5bf152b59..27f3bfb123 100644
--- a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java
+++ b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java
@@ -93,7 +93,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
client().admin().cluster().prepareReroute().execute().actionGet();
- clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
@@ -130,7 +130,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
client().admin().cluster().prepareReroute().execute().actionGet();
- clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForNoRelocatingShards(true)).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
@@ -171,7 +171,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
client().admin().cluster().prepareReroute().get();
- clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNoRelocatingShards(true).waitForNodes("2")).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
diff --git a/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java
index e91ed066cc..1ec4fa0f4f 100644
--- a/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java
@@ -20,11 +20,13 @@
package org.elasticsearch.indices;
import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@@ -45,6 +47,12 @@ public class DateMathIndexExpressionsIntegrationIT extends ESIntegTestCase {
String index3 = ".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now.minusDays(2));
createIndex(index1, index2, index3);
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(index1, index2, index3).get();
+ assertEquals(index1, getSettingsResponse.getSetting(index1, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+ assertEquals(index2, getSettingsResponse.getSetting(index2, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+ assertEquals(index3, getSettingsResponse.getSetting(index3, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+
+
String dateMathExp1 = "<.marvel-{now/d}>";
String dateMathExp2 = "<.marvel-{now/d-1d}>";
String dateMathExp3 = "<.marvel-{now/d-2d}>";
@@ -122,6 +130,12 @@ public class DateMathIndexExpressionsIntegrationIT extends ESIntegTestCase {
String dateMathExp3 = "<.marvel-{now/d-2d}>";
createIndex(dateMathExp1, dateMathExp2, dateMathExp3);
+
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(index1, index2, index3).get();
+ assertEquals(dateMathExp1, getSettingsResponse.getSetting(index1, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+ assertEquals(dateMathExp2, getSettingsResponse.getSetting(index2, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+ assertEquals(dateMathExp3, getSettingsResponse.getSetting(index3, IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
+
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
assertThat(clusterState.metaData().index(index1), notNullValue());
assertThat(clusterState.metaData().index(index2), notNullValue());
diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
index b7eb833106..2d0e4a3aeb 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
@@ -30,7 +30,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.index.shard.IndexShardTests;
+import org.elasticsearch.index.shard.IndexShardIT;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.threadpool.ThreadPool;
@@ -442,13 +442,13 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
shard.writeIndexingBuffer();
}
};
- final IndexShard newShard = IndexShardTests.newIndexShard(indexService, shard, wrapper, imc);
+ final IndexShard newShard = IndexShardIT.newIndexShard(indexService, shard, wrapper, imc);
shardRef.set(newShard);
try {
assertEquals(0, imc.availableShards().size());
ShardRouting routing = newShard.routingEntry();
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
- newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
+ newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null));
assertEquals(1, imc.availableShards().size());
assertTrue(newShard.recoverFromStore());
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
index 17a4b93c24..66d6c16b4e 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
@@ -21,6 +21,7 @@ package org.elasticsearch.indices;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.UnassignedInfo;
@@ -99,13 +100,15 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas
idx = index.index();
ShardRouting newRouting = shardRouting;
String nodeId = newRouting.currentNodeId();
- newRouting = ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom"));
+ UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom");
+ newRouting = newRouting.moveToUnassigned(unassignedInfo)
+ .updateUnassigned(unassignedInfo, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE);
newRouting = ShardRoutingHelper.initialize(newRouting, nodeId);
IndexShard shard = index.createShard(newRouting);
shard.updateRoutingEntry(newRouting);
final DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(),
emptyMap(), emptySet(), Version.CURRENT);
- shard.markAsRecovering("store", new RecoveryState(shard.shardId(), newRouting.primary(), RecoveryState.Type.SNAPSHOT, newRouting.restoreSource(), localNode));
+ shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null));
shard.recoverFromStore();
newRouting = ShardRoutingHelper.moveToStarted(newRouting);
shard.updateRoutingEntry(newRouting);
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java
index d43217d978..8bb8a4ddf8 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java
@@ -31,7 +31,6 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Settings;
@@ -216,7 +215,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
IOUtils.close(reader, secondReader, writer, dir, cache);
}
IndicesRequestCache cache = new IndicesRequestCache(Settings.builder()
- .put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.bytes()+1 +"b")
+ .put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes()+1 +"b")
.build());
AtomicBoolean indexShard = new AtomicBoolean(true);
ShardRequestCache requestCacheStats = new ShardRequestCache();
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
index 216ddf76f6..577b20c1fa 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
@@ -18,12 +18,9 @@
*/
package org.elasticsearch.indices;
-import org.apache.lucene.store.LockObtainFailedException;
import org.elasticsearch.Version;
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -250,14 +247,11 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
public void testDanglingIndicesWithAliasConflict() throws Exception {
final String indexName = "test-idx1";
final String alias = "test-alias";
- final IndicesService indicesService = getIndicesService();
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
- final IndexService test = createIndex(indexName);
+ createIndex(indexName);
// create the alias for the index
- AliasAction action = new AliasAction(AliasAction.Type.ADD, indexName, alias);
- IndicesAliasesRequest request = new IndicesAliasesRequest().addAliasAction(action);
- client().admin().indices().aliases(request).actionGet();
+ client().admin().indices().prepareAliases().addAlias(indexName, alias).get();
final ClusterState originalState = clusterService.state();
// try to import a dangling index with the same name as the alias, it should fail
@@ -276,9 +270,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
assertThat(clusterService.state(), equalTo(originalState));
// remove the alias
- action = new AliasAction(AliasAction.Type.REMOVE, indexName, alias);
- request = new IndicesAliasesRequest().addAliasAction(action);
- client().admin().indices().aliases(request).actionGet();
+ client().admin().indices().prepareAliases().removeAlias(indexName, alias).get();
// now try importing a dangling index with the same name as the alias, it should succeed.
listener = new DanglingListener();
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
index 33763571da..7baab0142f 100644
--- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
@@ -41,9 +41,9 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.Analysis;
import org.elasticsearch.index.analysis.AnalysisRegistry;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.CustomAnalyzer;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.PatternReplaceCharFilterFactory;
@@ -79,11 +79,11 @@ import static org.hamcrest.Matchers.is;
*/
public class AnalysisModuleTests extends ModuleTestCase {
- public AnalysisService getAnalysisService(Settings settings) throws IOException {
- return getAnalysisService(getNewRegistry(settings), settings);
+ public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException {
+ return getIndexAnalyzers(getNewRegistry(settings), settings);
}
- public AnalysisService getAnalysisService(AnalysisRegistry registry, Settings settings) throws IOException {
+ public IndexAnalyzers getIndexAnalyzers(AnalysisRegistry registry, Settings settings) throws IOException {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
return registry.build(idxSettings);
}
@@ -136,9 +136,9 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
- AnalysisService as = getAnalysisService(newRegistry, settings);
- assertThat(as.analyzer("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
- assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class)));
+ IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
+ assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
+ assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class)));
}
public void testAnalyzerAliasReferencesAlias() throws IOException {
@@ -152,10 +152,11 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
- AnalysisService as = getAnalysisService(newRegistry, settings);
- assertThat(as.analyzer("default").analyzer(), is(instanceOf(GermanAnalyzer.class)));
+ IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
+
+ assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(GermanAnalyzer.class)));
// analyzer types are bound early before we resolve aliases
- assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class)));
}
public void testAnalyzerAliasDefault() throws IOException {
@@ -167,9 +168,9 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
- AnalysisService as = getAnalysisService(newRegistry, settings);
- assertThat(as.analyzer("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
- assertThat(as.analyzer("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
+ IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
+ assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
+ assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
}
public void testAnalyzerAliasMoreThanOnce() throws IOException {
@@ -183,7 +184,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
- IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getAnalysisService(newRegistry, settings));
+ IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getIndexAnalyzers(newRegistry, settings));
assertEquals("alias [default] is already used by [foobar]", ise.getMessage());
}
@@ -192,11 +193,11 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put("index.analysis.analyzer.foobar.type", "standard")
.put("index.analysis.analyzer.foobar.alias","foobaz")
// analyzer aliases were removed in v5.0.0 alpha6
- .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha6, null))
+ .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_beta1, null))
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisRegistry registry = getNewRegistry(settings);
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> getAnalysisService(registry, settings));
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(registry, settings));
assertEquals("setting [index.analysis.analyzer.foobar.alias] is not supported", e.getMessage());
}
@@ -208,7 +209,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings2);
- AnalysisService analysisService2 = getAnalysisService(newRegistry, settings2);
+ IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2);
// registry always has the current version
assertThat(newRegistry.getAnalyzer("default"), is(instanceOf(NamedAnalyzer.class)));
@@ -217,20 +218,20 @@ public class AnalysisModuleTests extends ModuleTestCase {
assertEquals(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer().getVersion());
// analysis service has the expected version
- assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
- assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("standard").analyzer().getVersion());
- assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("thai").analyzer().getVersion());
+ assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion());
+ assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("thai").analyzer().getVersion());
- assertThat(analysisService2.analyzer("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class)));
- assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), analysisService2.analyzer("custom7").analyzer().getVersion());
+ assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), indexAnalyzers.get("custom7").analyzer().getVersion());
}
private void assertTokenFilter(String name, Class<?> clazz) throws IOException {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
- AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
- TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
+ TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get(name);
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream stream = tokenFilter.create(tokenizer);
@@ -238,8 +239,8 @@ public class AnalysisModuleTests extends ModuleTestCase {
}
private void testSimpleConfiguration(Settings settings) throws IOException {
- AnalysisService analysisService = getAnalysisService(settings);
- Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
+ IndexAnalyzers indexAnalyzers = getIndexAnalyzers(settings);
+ Analyzer analyzer = indexAnalyzers.get("custom1").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
@@ -249,23 +250,23 @@ public class AnalysisModuleTests extends ModuleTestCase {
StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
assertThat(stop1.stopWords().size(), equalTo(1));
- analyzer = analysisService.analyzer("custom2").analyzer();
+ analyzer = indexAnalyzers.get("custom2").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
// verify position increment gap
- analyzer = analysisService.analyzer("custom6").analyzer();
+ analyzer = indexAnalyzers.get("custom6").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom6 = (CustomAnalyzer) analyzer;
assertThat(custom6.getPositionIncrementGap("any_string"), equalTo(256));
// verify characters mapping
- analyzer = analysisService.analyzer("custom5").analyzer();
+ analyzer = indexAnalyzers.get("custom5").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
// check custom pattern replace filter
- analyzer = analysisService.analyzer("custom3").analyzer();
+ analyzer = indexAnalyzers.get("custom3").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
@@ -273,7 +274,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
// check custom class name (my)
- analyzer = analysisService.analyzer("custom4").analyzer();
+ analyzer = indexAnalyzers.get("custom4").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
@@ -333,7 +334,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, "1")
.build();
try {
- getAnalysisService(settings);
+ getIndexAnalyzers(settings);
fail("This should fail with IllegalArgumentException because the analyzers name starts with _");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), either(equalTo("analyzer name must not start with '_'. got \"_invalid_name\""))
@@ -350,7 +351,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
try {
- getAnalysisService(settings);
+ getIndexAnalyzers(settings);
fail("This should fail with IllegalArgumentException because the analyzers alias starts with _");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\""));
@@ -365,7 +366,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
try {
- getAnalysisService(settings);
+ getIndexAnalyzers(settings);
fail("Analyzer should fail if it has position_offset_gap");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("Option [position_offset_gap] in Custom Analyzer [custom] " +
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
index 2b19b01f2c..61b5d2eb31 100644
--- a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
@@ -19,7 +19,7 @@
package org.elasticsearch.indices.analysis;
-import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.analysis.StopwordAnalyzerBase;
public class DummyAnalyzer extends StopwordAnalyzerBase {
diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java
index 8e63653dfa..cd2c34e510 100644
--- a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java
@@ -32,7 +32,6 @@ import java.util.Map;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
-import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.startsWith;
@@ -76,18 +75,11 @@ public class AnalyzeActionIT extends ESIntegTestCase {
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "long", "type=long", "double", "type=double"));
ensureGreen("test");
- try {
- client().admin().indices().prepareAnalyze(indexOrAlias(), "123").setField("long").get();
- fail("shouldn't get here");
- } catch (IllegalArgumentException ex) {
- //all good
- }
- try {
- client().admin().indices().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get();
- fail("shouldn't get here");
- } catch (IllegalArgumentException ex) {
- //all good
- }
+ expectThrows(IllegalArgumentException.class,
+ () -> client().admin().indices().prepareAnalyze(indexOrAlias(), "123").setField("long").get());
+
+ expectThrows(IllegalArgumentException.class,
+ () -> client().admin().indices().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get());
}
public void testAnalyzeWithNoIndex() throws Exception {
@@ -450,18 +442,13 @@ public class AnalyzeActionIT extends ESIntegTestCase {
}
public void testNonExistTokenizer() {
- try {
- AnalyzeResponse analyzeResponse = client().admin().indices()
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> client().admin().indices()
.prepareAnalyze("this is a test")
.setAnalyzer("not_exist_analyzer")
- .get();
- fail("shouldn't get here");
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("failed to find global analyzer"));
-
- }
-
+ .get()
+ );
+ assertThat(e.getMessage(), startsWith("failed to find global analyzer"));
}
public void testCustomTokenFilterInRequest() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java
index 034cbeb636..19b55e2c77 100644
--- a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java
@@ -37,10 +37,10 @@ import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndex;
-import org.elasticsearch.indices.cluster.IndicesClusterStateService.Shard;
import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService.Shard;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
@@ -55,7 +55,9 @@ import java.util.concurrent.ConcurrentMap;
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
/**
* Abstract base class for tests against {@link IndicesClusterStateService}
@@ -87,7 +89,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId());
if (localRoutingNode != null) {
if (enableRandomFailures == false) {
- assertTrue("failed shard cache should be empty", failedShardsCache.isEmpty());
+ assertThat("failed shard cache should be empty", failedShardsCache.values(), empty());
}
// check that all shards in local routing nodes have been allocated
for (ShardRouting shardRouting : localRoutingNode) {
@@ -207,8 +209,9 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
@Override
public MockIndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState,
- RecoveryTargetService recoveryTargetService,
- RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ PeerRecoveryTargetService recoveryTargetService,
+ PeerRecoveryTargetService.RecoveryListener recoveryListener,
+ RepositoriesService repositoriesService,
NodeServicesProvider nodeServicesProvider, Callback<IndexShard.ShardFailure> onShardFailure)
throws IOException {
failRandomly();
@@ -260,6 +263,9 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
@Override
public void updateMetaData(IndexMetaData indexMetaData) {
indexSettings.updateIndexMetaData(indexMetaData);
+ for (MockIndexShard shard: shards.values()) {
+ shard.updateTerm(indexMetaData.primaryTerm(shard.shardId().id()));
+ }
}
@Override
@@ -269,7 +275,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
public synchronized MockIndexShard createShard(ShardRouting routing) throws IOException {
failRandomly();
- MockIndexShard shard = new MockIndexShard(routing);
+ MockIndexShard shard = new MockIndexShard(routing, indexSettings.getIndexMetaData().primaryTerm(routing.shardId().id()));
shards = newMapBuilder(shards).put(routing.id(), shard).immutableMap();
return shard;
}
@@ -302,9 +308,11 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
protected class MockIndexShard implements IndicesClusterStateService.Shard {
private volatile ShardRouting shardRouting;
private volatile RecoveryState recoveryState;
+ private volatile long term;
- public MockIndexShard(ShardRouting shardRouting) {
+ public MockIndexShard(ShardRouting shardRouting, long term) {
this.shardRouting = shardRouting;
+ this.term = term;
}
@Override
@@ -330,9 +338,21 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
@Override
public void updateRoutingEntry(ShardRouting shardRouting) throws IOException {
failRandomly();
- assert this.shardId().equals(shardRouting.shardId());
- assert this.shardRouting.isSameAllocation(shardRouting);
+ assertThat(this.shardId(), equalTo(shardRouting.shardId()));
+ assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting));
+ if (this.shardRouting.active()) {
+ assertTrue("and active shard must stay active, current: " + this.shardRouting + ", got: " + shardRouting,
+ shardRouting.active());
+ }
this.shardRouting = shardRouting;
}
+
+ public void updateTerm(long newTerm) {
+ assertThat("term can only be incremented: " + shardRouting, newTerm, greaterThanOrEqualTo(term));
+ if (shardRouting.primary() && shardRouting.active()) {
+ assertThat("term can not be changed on an active primary shard: " + shardRouting, newTerm, equalTo(term));
+ }
+ this.term = newTerm;
+ }
}
}
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java
index c3429edc39..dcadd60cc7 100644
--- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java
@@ -53,9 +53,8 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
@@ -71,7 +70,7 @@ import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
@@ -117,7 +116,7 @@ public class ClusterStateChanges extends AbstractComponent {
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings),
new ReplicaAfterPrimaryActiveAllocationDecider(settings),
new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings),
+ new TestGatewayAllocator(), new BalancedShardsAllocator(settings),
EmptyClusterInfoService.INSTANCE);
shardFailedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger);
shardStartedClusterStateTaskExecutor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger);
@@ -150,7 +149,8 @@ public class ClusterStateChanges extends AbstractComponent {
}
// services
- TransportService transportService = new TransportService(settings, transport, threadPool);
+ TransportService transportService = new TransportService(settings, transport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) {
// metaData upgrader should do nothing
@Override
@@ -165,7 +165,7 @@ public class ClusterStateChanges extends AbstractComponent {
MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService,
allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, nodeServicesProvider);
MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService,
- allocationService, new AliasValidator(settings), Collections.emptySet(), environment,
+ allocationService, new AliasValidator(settings), environment,
nodeServicesProvider, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool);
transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool,
@@ -207,14 +207,13 @@ public class ClusterStateChanges extends AbstractComponent {
}
public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
- RoutingAllocation.Result rerouteResult = allocationService.deassociateDeadNodes(clusterState, reroute, reason);
- return ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ return allocationService.deassociateDeadNodes(clusterState, reroute, reason);
}
- public ClusterState applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
+ public ClusterState applyFailedShards(ClusterState clusterState, List<FailedShard> failedShards) {
List<ShardStateAction.ShardEntry> entries = failedShards.stream().map(failedShard ->
- new ShardStateAction.ShardEntry(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId(),
- 0L, failedShard.message, failedShard.failure))
+ new ShardStateAction.ShardEntry(failedShard.getRoutingEntry().shardId(), failedShard.getRoutingEntry().allocationId().getId(),
+ 0L, failedShard.getMessage(), failedShard.getFailure()))
.collect(Collectors.toList());
try {
return shardFailedClusterStateTaskExecutor.execute(clusterState, entries).resultingState;
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java
index 44231f29ba..48a105874f 100644
--- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.cluster;
+import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
@@ -38,14 +39,14 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -62,6 +63,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
@@ -78,7 +80,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
// we have an IndicesClusterStateService per node in the cluster
final Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap = new HashMap<>();
ClusterState state = randomInitialClusterState(clusterStateServiceMap, MockIndicesService::new);
-
// each of the following iterations represents a new cluster state update processed on all nodes
for (int i = 0; i < 30; i++) {
logger.info("Iteration {}", i);
@@ -86,7 +87,14 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
// calculate new cluster state
for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states
- state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new);
+ try {
+ state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new);
+ } catch (AssertionError error) {
+ ClusterState finalState = state;
+ logger.error((org.apache.logging.log4j.util.Supplier<?>) () ->
+ new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState.prettyPrint()), error);
+ throw error;
+ }
}
// apply cluster state to nodes (incl. master)
@@ -94,7 +102,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
ClusterState localState = adaptClusterStateToLocalNode(state, node);
ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node);
- indicesClusterStateService.clusterChanged(new ClusterChangedEvent("simulated change " + i, localState, previousLocalState));
+ final ClusterChangedEvent event = new ClusterChangedEvent("simulated change " + i, localState, previousLocalState);
+ try {
+ indicesClusterStateService.clusterChanged(event);
+ } catch (AssertionError error) {
+ logger.error((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
+ "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}",
+ node, event.previousState().prettyPrint(), event.state().prettyPrint()), error);
+ throw error;
+ }
// check that cluster state has been properly applied to node
assertClusterStateMatchesNodeState(localState, indicesClusterStateService);
@@ -137,7 +153,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
// simulate the cluster state change on the node
ClusterState localState = adaptClusterStateToLocalNode(stateWithIndex, node);
ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node);
- IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(RecordingIndicesService::new);
+ IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new);
indicesCSSvc.start();
indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState));
@@ -183,7 +199,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
Supplier<MockIndicesService> indicesServiceSupplier) {
for (DiscoveryNode node : state.nodes()) {
clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> {
- IndicesClusterStateService ics = createIndicesClusterStateService(indicesServiceSupplier);
+ IndicesClusterStateService ics = createIndicesClusterStateService(discoveryNode, indicesServiceSupplier);
ics.start();
return ics;
});
@@ -206,10 +222,14 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
break;
}
String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT);
- CreateIndexRequest request = new CreateIndexRequest(name, Settings.builder()
+ Settings.Builder settingsBuilder = Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3))
- .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2))
- .build()).waitForActiveShards(ActiveShardCount.NONE);
+ .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
+ if (randomBoolean()) {
+ settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true);
+ }
+ CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metaData().hasIndex(name));
}
@@ -309,6 +329,13 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
state = cluster.deassociateDeadNodes(state, true, "removed and added a node");
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
+ if (randomBoolean()) {
+ // and add it back
+ DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(discoveryNode).build();
+ state = ClusterState.builder(state).nodes(newNodes).build();
+ state = cluster.reroute(state, new ClusterRerouteRequest());
+ updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
+ }
}
}
}
@@ -318,12 +345,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
return state;
}
+ private static final AtomicInteger nodeIdGenerator = new AtomicInteger();
+
protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) {
Set<DiscoveryNode.Role> roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values())));
for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) {
roles.add(mustHaveRole);
}
- return new DiscoveryNode("node_" + randomAsciiOfLength(8), LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles,
+ final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet());
+ return new DiscoveryNode(id, id, LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles,
Version.CURRENT);
}
@@ -331,19 +361,22 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build();
}
- private IndicesClusterStateService createIndicesClusterStateService(final Supplier<MockIndicesService> indicesServiceSupplier) {
+ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNode discoveryNode,
+ final Supplier<MockIndicesService> indicesServiceSupplier) {
final ThreadPool threadPool = mock(ThreadPool.class);
final Executor executor = mock(Executor.class);
when(threadPool.generic()).thenReturn(executor);
final MockIndicesService indicesService = indicesServiceSupplier.get();
- final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool);
+ final Settings settings = Settings.builder().put("node.name", discoveryNode.getName()).build();
+ final TransportService transportService = new TransportService(settings, null, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
final ClusterService clusterService = mock(ClusterService.class);
- final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService,
+ final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService,
transportService, null);
- final RecoveryTargetService recoveryTargetService = new RecoveryTargetService(Settings.EMPTY, threadPool,
+ final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool,
transportService, null, clusterService);
final ShardStateAction shardStateAction = mock(ShardStateAction.class);
- return new IndicesClusterStateService(Settings.EMPTY, indicesService, clusterService,
+ return new IndicesClusterStateService(settings, indicesService, clusterService,
threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, null);
}
diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java
index 7d950a7383..d974ea348c 100644
--- a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java
@@ -62,7 +62,7 @@ public class FlushIT extends ESIntegTestCase {
final CountDownLatch latch = new CountDownLatch(10);
final CopyOnWriteArrayList<Throwable> errors = new CopyOnWriteArrayList<>();
for (int j = 0; j < 10; j++) {
- client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute(new ActionListener<FlushResponse>() {
+ client().admin().indices().prepareFlush("test").execute(new ActionListener<FlushResponse>() {
@Override
public void onResponse(FlushResponse flushResponse) {
try {
@@ -128,7 +128,7 @@ public class FlushIT extends ESIntegTestCase {
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get();
client().admin().cluster().prepareHealth()
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java
index 361bca6076..bf755557a1 100644
--- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java
@@ -48,7 +48,6 @@ import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
-import org.elasticsearch.test.junit.annotations.TestLogging;
import org.junit.After;
import org.junit.Before;
@@ -468,11 +467,11 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
for (BulkItemResponse bulkItemResponse : response) {
Throwable cause = ExceptionsHelper.unwrapCause(bulkItemResponse.getFailure().getCause());
assertThat(cause, instanceOf(CircuitBreakingException.class));
- assertEquals(((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.bytes());
+ assertEquals(((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.getBytes());
}
}
} catch (CircuitBreakingException ex) {
- assertEquals(ex.getByteLimit(), inFlightRequestsLimit.bytes());
+ assertEquals(ex.getByteLimit(), inFlightRequestsLimit.getBytes());
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java
index 033b3bb75c..1c8eaf71f8 100644
--- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java
@@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.notNullValue;
*/
public class CircuitBreakerUnitTests extends ESTestCase {
public static long pctBytes(String percentString) {
- return Settings.EMPTY.getAsMemory("", percentString).bytes();
+ return Settings.EMPTY.getAsMemory("", percentString).getBytes();
}
public void testBreakerSettingsValidationWithValidSettings() {
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
index ad4ea6567c..95067992ee 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
@@ -28,10 +28,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import static org.hamcrest.Matchers.equalTo;
@@ -41,7 +44,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25;
- @TestLogging("_root:DEBUG,action.delete:TRACE,action.index:TRACE,index.shard:TRACE,cluster.service:TRACE")
+ @TestLogging("_root:DEBUG,org.elasticsearch.action.delete:TRACE,org.elasticsearch.action.index:TRACE,index.shard:TRACE,org.elasticsearch.cluster.service:TRACE")
public void testPrimaryRelocationWhileIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3));
client().admin().indices().prepareCreate("test")
@@ -49,7 +52,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
.addMapping("type", "field", "type=text")
.get();
ensureGreen("test");
-
+ AtomicInteger numAutoGenDocs = new AtomicInteger();
final AtomicBoolean finished = new AtomicBoolean(false);
Thread indexingThread = new Thread() {
@Override
@@ -59,6 +62,8 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
DeleteResponse deleteResponse = client().prepareDelete("test", "type", "id").get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
+ client().prepareIndex("test", "type").setSource("auto", true).get();
+ numAutoGenDocs.incrementAndGet();
}
}
};
@@ -76,7 +81,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
client().admin().cluster().prepareReroute()
.add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()))
.execute().actionGet();
- ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> [iteration {}] relocation complete", i);
relocationSource = relocationTarget;
@@ -87,5 +92,9 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
}
finished.set(true);
indexingThread.join();
+ refresh("test");
+ ElasticsearchAssertions.assertHitCount(client().prepareSearch("test").get(), numAutoGenDocs.get());
+ ElasticsearchAssertions.assertHitCount(client().prepareSearch("test")// extra paranoia ;)
+ .setQuery(QueryBuilders.termQuery("auto", true)).get(), numAutoGenDocs.get());
}
}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
index 16e7045ceb..01eb2a6e52 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.recovery;
+import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
@@ -32,6 +33,10 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
@@ -42,8 +47,8 @@ import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.recovery.RecoveryState.Stage;
-import org.elasticsearch.indices.recovery.RecoveryState.Type;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@@ -99,10 +104,11 @@ public class IndexRecoveryIT extends ESIntegTestCase {
return Arrays.asList(MockTransportService.TestPlugin.class, MockFSIndexStore.TestPlugin.class);
}
- private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, Type type,
- String sourceNode, String targetNode, boolean hasRestoreSource) {
+ private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, RecoverySource recoverySource, boolean primary,
+ String sourceNode, String targetNode) {
assertThat(state.getShardId().getId(), equalTo(shardId));
- assertThat(state.getType(), equalTo(type));
+ assertThat(state.getRecoverySource(), equalTo(recoverySource));
+ assertThat(state.getPrimary(), equalTo(primary));
if (sourceNode == null) {
assertNull(state.getSourceNode());
} else {
@@ -115,28 +121,22 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertNotNull(state.getTargetNode());
assertThat(state.getTargetNode().getName(), equalTo(targetNode));
}
- if (hasRestoreSource) {
- assertNotNull(state.getRestoreSource());
- } else {
- assertNull(state.getRestoreSource());
- }
-
}
- private void assertRecoveryState(RecoveryState state, int shardId, Type type, Stage stage,
- String sourceNode, String targetNode, boolean hasRestoreSource) {
- assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
+ private void assertRecoveryState(RecoveryState state, int shardId, RecoverySource type, boolean primary, Stage stage,
+ String sourceNode, String targetNode) {
+ assertRecoveryStateWithoutStage(state, shardId, type, primary, sourceNode, targetNode);
assertThat(state.getStage(), equalTo(stage));
}
- private void assertOnGoingRecoveryState(RecoveryState state, int shardId, Type type,
- String sourceNode, String targetNode, boolean hasRestoreSource) {
- assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
+ private void assertOnGoingRecoveryState(RecoveryState state, int shardId, RecoverySource type, boolean primary,
+ String sourceNode, String targetNode) {
+ assertRecoveryStateWithoutStage(state, shardId, type, primary, sourceNode, targetNode);
assertThat(state.getStage(), not(equalTo(Stage.DONE)));
}
private void slowDownRecovery(ByteSizeValue shardSize) {
- long chunkSize = Math.max(1, shardSize.bytes() / 10);
+ long chunkSize = Math.max(1, shardSize.getBytes() / 10);
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
setChunkSize(settings, new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES));
}
@@ -179,7 +179,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
RecoveryState recoveryState = recoveryStates.get(0);
- assertRecoveryState(recoveryState, 0, Type.STORE, Stage.DONE, node, node, false);
+ assertRecoveryState(recoveryState, 0, StoreRecoverySource.EXISTING_STORE_INSTANCE, true, Stage.DONE, null, node);
validateIndexRecoveryState(recoveryState.getIndex());
}
@@ -232,12 +232,12 @@ public class IndexRecoveryIT extends ESIntegTestCase {
// validate node A recovery
RecoveryState nodeARecoveryState = nodeAResponses.get(0);
- assertRecoveryState(nodeARecoveryState, 0, Type.STORE, Stage.DONE, nodeA, nodeA, false);
+ assertRecoveryState(nodeARecoveryState, 0, StoreRecoverySource.EMPTY_STORE_INSTANCE, true, Stage.DONE, null, nodeA);
validateIndexRecoveryState(nodeARecoveryState.getIndex());
// validate node B recovery
RecoveryState nodeBRecoveryState = nodeBResponses.get(0);
- assertRecoveryState(nodeBRecoveryState, 0, Type.REPLICA, Stage.DONE, nodeA, nodeB, false);
+ assertRecoveryState(nodeBRecoveryState, 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryState.getIndex());
}
@@ -285,10 +285,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
List<RecoveryState> nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
- assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false);
+ assertRecoveryState(nodeARecoveryStates.get(0), 0, StoreRecoverySource.EMPTY_STORE_INSTANCE, true, Stage.DONE, null, nodeA);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
- assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, nodeA, nodeB, false);
+ assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
logger.info("--> request node recovery stats");
@@ -341,7 +341,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
- assertRecoveryState(recoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ assertRecoveryState(recoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(recoveryStates.get(0).getIndex());
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
@@ -399,14 +399,14 @@ public class IndexRecoveryIT extends ESIntegTestCase {
List<RecoveryState> nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
- assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false);
+ assertRecoveryState(nodeARecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeB, nodeA);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
- assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
- assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, nodeB, nodeC, false);
+ assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
if (randomBoolean()) {
@@ -424,10 +424,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
- assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
- assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, nodeB, nodeC, false);
+ assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
}
@@ -445,11 +445,11 @@ public class IndexRecoveryIT extends ESIntegTestCase {
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
- assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
- assertRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeC, false);
+ assertRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
}
@@ -498,7 +498,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertThat(recoveryStates.size(), equalTo(totalShards));
for (RecoveryState recoveryState : recoveryStates) {
- assertRecoveryState(recoveryState, 0, Type.SNAPSHOT, Stage.DONE, null, nodeA, true);
+ SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(
+ new Snapshot(REPO_NAME, createSnapshotResponse.getSnapshotInfo().snapshotId()),
+ Version.CURRENT, INDEX_NAME);
+ assertRecoveryState(recoveryState, 0, recoverySource, true, Stage.DONE, null, nodeA);
validateIndexRecoveryState(recoveryState.getIndex());
}
}
@@ -577,7 +580,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250);
for (int i = 0; i < numDocs; i++) {
- requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
+ requests.add(client().prepareIndex(indexName, "type").setSource("{}"));
}
indexRandom(true, requests);
ensureSearchable(indexName);
@@ -591,13 +594,13 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertHitCount(searchResponse, numDocs);
String[] recoveryActions = new String[]{
- RecoverySource.Actions.START_RECOVERY,
- RecoveryTargetService.Actions.FILES_INFO,
- RecoveryTargetService.Actions.FILE_CHUNK,
- RecoveryTargetService.Actions.CLEAN_FILES,
+ PeerRecoverySourceService.Actions.START_RECOVERY,
+ PeerRecoveryTargetService.Actions.FILES_INFO,
+ PeerRecoveryTargetService.Actions.FILE_CHUNK,
+ PeerRecoveryTargetService.Actions.CLEAN_FILES,
//RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed
- RecoveryTargetService.Actions.PREPARE_TRANSLOG,
- RecoveryTargetService.Actions.FINALIZE
+ PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG,
+ PeerRecoveryTargetService.Actions.FINALIZE
};
final String recoveryActionToBlock = randomFrom(recoveryActions);
final boolean dropRequests = randomBoolean();
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java
index e0bb251f47..0678ebef99 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java
@@ -35,6 +35,7 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.store.IndexOutputOutputStream;
@@ -83,7 +84,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
- null, RecoveryState.Type.STORE, randomLong());
+ null, randomBoolean(), randomLong());
Store store = newStore(createTempDir());
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, () -> 0L, e -> () -> {},
recoverySettings.getChunkSize().bytesAsInt(), logger);
@@ -135,7 +136,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
- null, RecoveryState.Type.STORE, randomLong());
+ null, randomBoolean(), randomLong());
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
@@ -199,7 +200,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
- null, RecoveryState.Type.STORE, randomLong());
+ null, randomBoolean(), randomLong());
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
@@ -258,7 +259,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
- null, RecoveryState.Type.REPLICA, randomLong());
+ null, false, randomLong());
IndexShard shard = mock(IndexShard.class);
Translog.View translogView = mock(Translog.View.class);
when(shard.acquireTranslogView()).thenReturn(translogView);
@@ -288,7 +289,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
- null, RecoveryState.Type.PRIMARY_RELOCATION, randomLong());
+ null, true, randomLong());
AtomicBoolean phase1Called = new AtomicBoolean();
AtomicBoolean phase2Called = new AtomicBoolean();
AtomicBoolean ensureClusterStateVersionCalled = new AtomicBoolean();
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java
index 587dc35bc5..55359a935f 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java
@@ -43,7 +43,7 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase {
IndexShard indexShard = service.getShardOrNull(0);
DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), emptyMap(), emptySet(), Version.CURRENT);
- RecoveryTarget status = new RecoveryTarget(indexShard, node, new RecoveryTargetService.RecoveryListener() {
+ RecoveryTarget status = new RecoveryTarget(indexShard, node, new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java
index d0401196b9..6d37ae7d0d 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java
@@ -20,6 +20,10 @@ package org.elasticsearch.indices.recovery;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -31,7 +35,6 @@ import org.elasticsearch.indices.recovery.RecoveryState.Index;
import org.elasticsearch.indices.recovery.RecoveryState.Stage;
import org.elasticsearch.indices.recovery.RecoveryState.Timer;
import org.elasticsearch.indices.recovery.RecoveryState.Translog;
-import org.elasticsearch.indices.recovery.RecoveryState.Type;
import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex;
import org.elasticsearch.test.ESTestCase;
@@ -351,8 +354,10 @@ public class RecoveryTargetTests extends ESTestCase {
stages[i] = stages[j];
stages[j] = t;
try {
- RecoveryState state = new RecoveryState(
- new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode);
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("bla", "_na_", 0), discoveryNode.getId(),
+ randomBoolean(), ShardRoutingState.INITIALIZING);
+ RecoveryState state = new RecoveryState(shardRouting, discoveryNode,
+ shardRouting.recoverySource().getType() == RecoverySource.Type.PEER ? discoveryNode : null);
for (Stage stage : stages) {
state.setStage(stage);
}
@@ -366,8 +371,10 @@ public class RecoveryTargetTests extends ESTestCase {
i = randomIntBetween(1, stages.length - 1);
ArrayList<Stage> list = new ArrayList<>(Arrays.asList(Arrays.copyOfRange(stages, 0, i)));
list.addAll(Arrays.asList(stages));
- RecoveryState state = new RecoveryState(new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode,
- discoveryNode);
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("bla", "_na_", 0), discoveryNode.getId(),
+ randomBoolean(), ShardRoutingState.INITIALIZING);
+ RecoveryState state = new RecoveryState(shardRouting, discoveryNode,
+ shardRouting.recoverySource().getType() == RecoverySource.Type.PEER ? discoveryNode : null);
for (Stage stage : list) {
state.setStage(stage);
}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java
index 14d6ff8231..7065ffa5df 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java
@@ -46,7 +46,7 @@ public class StartRecoveryRequestTests extends ESTestCase {
new DiscoveryNode("a", new LocalTransportAddress("1"), emptyMap(), emptySet(), targetNodeVersion),
new DiscoveryNode("b", new LocalTransportAddress("1"), emptyMap(), emptySet(), targetNodeVersion),
Store.MetadataSnapshot.EMPTY,
- RecoveryState.Type.PRIMARY_RELOCATION,
+ randomBoolean(),
1L
);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
@@ -65,7 +65,7 @@ public class StartRecoveryRequestTests extends ESTestCase {
assertThat(outRequest.targetNode(), equalTo(inRequest.targetNode()));
assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap()));
assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId()));
- assertThat(outRequest.recoveryType(), equalTo(inRequest.recoveryType()));
+ assertThat(outRequest.isPrimaryRelocation(), equalTo(inRequest.isPrimaryRelocation()));
}
}
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java
index 03dade372c..9b759bff56 100644
--- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java
@@ -86,7 +86,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
allowNodes("test", 3);
logger.info("Running Cluster Health");
- clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
@@ -104,7 +104,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase {
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)).get());
logger.info("Running Cluster Health");
- clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java
index 4d82acf87f..cd562e0fde 100644
--- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java
@@ -19,16 +19,21 @@
package org.elasticsearch.indices.settings;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.filter.RegexFilter;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexModule;
@@ -110,7 +115,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
IndexService indexService = service.indexService(resolveIndex("test"));
if (indexService != null) {
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1);
- assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024);
+ assertEquals(indexService.getIndexSettings().getFlushThresholdSize().getBytes(), 1024);
}
}
client().admin().indices().prepareUpdateSettings("test")
@@ -124,7 +129,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
IndexService indexService = service.indexService(resolveIndex("test"));
if (indexService != null) {
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000);
- assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024);
+ assertEquals(indexService.getIndexSettings().getFlushThresholdSize().getBytes(), 1024);
}
}
}
@@ -348,13 +353,17 @@ public class UpdateSettingsIT extends ESIntegTestCase {
logger.info("test: test done");
}
- private static class MockAppender extends AppenderSkeleton {
+ private static class MockAppender extends AbstractAppender {
public boolean sawUpdateMaxThreadCount;
public boolean sawUpdateAutoThrottle;
+ public MockAppender(final String name) throws IllegalAccessException {
+ super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
+ }
+
@Override
- protected void append(LoggingEvent event) {
- String message = event.getMessage().toString();
+ public void append(LogEvent event) {
+ String message = event.getMessage().getFormattedMessage();
if (event.getLevel() == Level.TRACE &&
event.getLoggerName().endsWith("lucene.iw")) {
}
@@ -366,22 +375,14 @@ public class UpdateSettingsIT extends ESIntegTestCase {
}
}
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
- @Override
- public void close() {
- }
}
- public void testUpdateAutoThrottleSettings() {
- MockAppender mockAppender = new MockAppender();
- Logger rootLogger = Logger.getRootLogger();
+ public void testUpdateAutoThrottleSettings() throws IllegalAccessException {
+ MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
+ Logger rootLogger = LogManager.getRootLogger();
Level savedLevel = rootLogger.getLevel();
- rootLogger.addAppender(mockAppender);
- rootLogger.setLevel(Level.TRACE);
+ Loggers.addAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, Level.TRACE);
try {
// No throttling at first, only 1 non-replicated shard, force lots of merging:
@@ -412,18 +413,65 @@ public class UpdateSettingsIT extends ESIntegTestCase {
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey()), equalTo("false"));
} finally {
- rootLogger.removeAppender(mockAppender);
- rootLogger.setLevel(savedLevel);
+ Loggers.removeAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, savedLevel);
+ }
+ }
+
+ public void testInvalidMergeMaxThreadCount() throws IllegalAccessException {
+ CreateIndexRequestBuilder createBuilder = prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
+ .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
+ .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "100")
+ .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10")
+ );
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> createBuilder.get());
+ assertThat(exc.getMessage(), equalTo("maxThreadCount (= 100) should be <= maxMergeCount (= 10)"));
+
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
+ .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
+ .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "100")
+ .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "100")
+ ));
+
+ {
+ UpdateSettingsRequestBuilder updateBuilder = client().admin().indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1000")
+ );
+ exc = expectThrows(IllegalArgumentException.class,
+ () -> updateBuilder.get());
+ assertThat(exc.getMessage(), equalTo("maxThreadCount (= 1000) should be <= maxMergeCount (= 100)"));
+ }
+
+ {
+ UpdateSettingsRequestBuilder updateBuilder = client().admin().indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10")
+ );
+ exc = expectThrows(IllegalArgumentException.class,
+ () -> updateBuilder.get());
+ assertThat(exc.getMessage(), equalTo("maxThreadCount (= 100) should be <= maxMergeCount (= 10)"));
}
}
// #6882: make sure we can change index.merge.scheduler.max_thread_count live
- public void testUpdateMergeMaxThreadCount() {
- MockAppender mockAppender = new MockAppender();
- Logger rootLogger = Logger.getRootLogger();
+ public void testUpdateMergeMaxThreadCount() throws IllegalAccessException {
+ MockAppender mockAppender = new MockAppender("testUpdateMergeMaxThreadCount");
+ Logger rootLogger = LogManager.getRootLogger();
Level savedLevel = rootLogger.getLevel();
- rootLogger.addAppender(mockAppender);
- rootLogger.setLevel(Level.TRACE);
+ Loggers.addAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, Level.TRACE);
try {
@@ -456,8 +504,8 @@ public class UpdateSettingsIT extends ESIntegTestCase {
assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey()), equalTo("1"));
} finally {
- rootLogger.removeAppender(mockAppender);
- rootLogger.setLevel(savedLevel);
+ Loggers.removeAppender(rootLogger, mockAppender);
+ Loggers.setLevel(rootLogger, savedLevel);
}
}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
index 8eef10d693..4f97264af9 100644
--- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
@@ -348,7 +348,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
}
indexRandom(true, builder);
if (randomBoolean()) {
- client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).setForce(true).execute().get();
+ client().admin().indices().prepareFlush("test").setForce(true).execute().get();
}
client().admin().indices().prepareClose("test").execute().get();
@@ -413,4 +413,4 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
}
}
}
-} \ No newline at end of file
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java
index 67fe440a29..2ca8947cbf 100644
--- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java
@@ -37,7 +37,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@@ -58,6 +57,7 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -96,7 +96,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
ClusterState current = clusterService().state();
GatewayAllocator allocator = internalCluster().getInstance(GatewayAllocator.class);
- AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[0]);
+ AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Collections.emptyList());
RoutingNodes routingNodes = new RoutingNodes(
ClusterState.builder(current)
.routingTable(RoutingTable.builder(current.routingTable()).remove("a").addAsRecovery(current.metaData().index("a")).build())
@@ -139,8 +139,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
routingTable.addAsRecovery(updatedState.metaData().index(index));
updatedState = ClusterState.builder(updatedState).routingTable(routingTable.build()).build();
- RoutingAllocation.Result result = allocationService.reroute(updatedState, "reroute");
- return ClusterState.builder(updatedState).routingResult(result).build();
+ return allocationService.reroute(updatedState, "reroute");
}
@@ -158,8 +157,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
builder.nodes(DiscoveryNodes.builder(currentState.nodes()).remove("_non_existent"));
currentState = builder.build();
- RoutingAllocation.Result result = allocationService.deassociateDeadNodes(currentState, true, "reroute");
- return ClusterState.builder(currentState).routingResult(result).build();
+ return allocationService.deassociateDeadNodes(currentState, true, "reroute");
}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
index 66687ea74f..0515887a55 100644
--- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.state;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
@@ -28,7 +29,6 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRoutingState;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
@@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.nullValue;
*/
@ESIntegTestCase.ClusterScope(minNumDataNodes = 2)
public class SimpleIndexStateIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(SimpleIndexStateIT.class);
+ private final Logger logger = Loggers.getLogger(SimpleIndexStateIT.class);
public void testSimpleOpenClose() {
logger.info("--> creating test index");
diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
index aee3dd227e..b95d872a61 100644
--- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
@@ -647,7 +647,7 @@ public class IndexStatsIT extends ESIntegTestCase {
flags.writeTo(out);
out.close();
BytesReference bytes = out.bytes();
- CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(bytes.streamInput());
+ CommonStatsFlags readStats = new CommonStatsFlags(bytes.streamInput());
for (Flag flag : values) {
assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
}
@@ -661,7 +661,7 @@ public class IndexStatsIT extends ESIntegTestCase {
flags.writeTo(out);
out.close();
BytesReference bytes = out.bytes();
- CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(bytes.streamInput());
+ CommonStatsFlags readStats = new CommonStatsFlags(bytes.streamInput());
for (Flag flag : values) {
assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
}
diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
index a1818a15d1..b248fc811f 100644
--- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.indices.store;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cluster.ClusterState;
@@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
@@ -45,7 +45,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.indices.recovery.RecoverySource;
+import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
@@ -125,7 +125,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
logger.info("--> running cluster_health");
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
.setWaitForNodes("4")
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@@ -158,7 +158,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_3)).get();
}
clusterHealth = client().admin().cluster().prepareHealth()
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@@ -215,7 +215,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
shardActiveRequestSent.await();
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
logClusterState();
@@ -255,7 +255,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
logger.info("--> running cluster_health");
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
.setWaitForNodes("3")
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@@ -270,7 +270,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
clusterHealth = client().admin().cluster().prepareHealth()
.setWaitForGreenStatus()
.setWaitForNodes("2")
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
@@ -313,7 +313,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", node4)
));
- assertFalse(client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).setWaitForGreenStatus().setWaitForNodes("5").get().isTimedOut());
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).setWaitForGreenStatus().setWaitForNodes("5").get().isTimedOut());
// disable allocation to control the situation more easily
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
@@ -474,11 +474,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
* the ShardActiveRequest.
*/
public static class ReclocationStartEndTracer extends MockTransportService.Tracer {
- private final ESLogger logger;
+ private final Logger logger;
private final CountDownLatch beginRelocationLatch;
private final CountDownLatch receivedShardExistsRequestLatch;
- public ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) {
+ public ReclocationStartEndTracer(Logger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) {
this.logger = logger;
this.beginRelocationLatch = beginRelocationLatch;
this.receivedShardExistsRequestLatch = receivedShardExistsRequestLatch;
@@ -494,7 +494,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
@Override
public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
- if (action.equals(RecoverySource.Actions.START_RECOVERY)) {
+ if (action.equals(PeerRecoverySourceService.Actions.START_RECOVERY)) {
logger.info("sent: {}, relocation starts", action);
beginRelocationLatch.countDown();
}
diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
index d2e603ffd4..62b5bc30a6 100644
--- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
@@ -88,7 +88,7 @@ public class IndicesStoreTests extends ESTestCase {
public void before() {
localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT);
clusterService = createClusterService(threadPool);
- indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null), null);
+ indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null);
}
@After
@@ -129,7 +129,7 @@ public class IndicesStoreTests extends ESTestCase {
if (state == ShardRoutingState.UNASSIGNED) {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
- routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", null, null, j == 0, state, unassignedInfo));
+ routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", null, j == 0, state, unassignedInfo));
}
}
assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
diff --git a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java
deleted file mode 100644
index 356538d62c..0000000000
--- a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringIT.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.indices.template;
-
-import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
-import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
-import org.elasticsearch.cluster.ClusterModule;
-import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
-import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
-import org.elasticsearch.cluster.metadata.MappingMetaData;
-import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
-import org.elasticsearch.test.ESIntegTestCase.Scope;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.core.IsNull.notNullValue;
-
-@ClusterScope(scope = Scope.SUITE)
-public class IndexTemplateFilteringIT extends ESIntegTestCase {
- @Override
- protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Arrays.asList(TestPlugin.class);
- }
-
- public void testTemplateFiltering() throws Exception {
- client().admin().indices().preparePutTemplate("template1")
- .setTemplate("test*")
- .addMapping("type1", "field1", "type=text").get();
-
- client().admin().indices().preparePutTemplate("template2")
- .setTemplate("test*")
- .addMapping("type2", "field2", "type=text").get();
-
- client().admin().indices().preparePutTemplate("template3")
- .setTemplate("no_match")
- .addMapping("type3", "field3", "type=text").get();
-
- assertAcked(prepareCreate("test"));
-
- GetMappingsResponse response = client().admin().indices().prepareGetMappings("test").get();
- assertThat(response, notNullValue());
- ImmutableOpenMap<String, MappingMetaData> metadata = response.getMappings().get("test");
- assertThat(metadata.size(), is(1));
- assertThat(metadata.get("type2"), notNullValue());
- }
-
- public static class TestFilter implements IndexTemplateFilter {
- @Override
- public boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template) {
- //make sure that no_match template is filtered out before the custom filters as it doesn't match the index name
- return (template.name().equals("template2") || template.name().equals("no_match"));
- }
- }
-
- public static class TestPlugin extends Plugin {
- public void onModule(ClusterModule module) {
- module.registerIndexTemplateFilter(TestFilter.class);
- }
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java
index 676f26e7d7..e40361e94f 100644
--- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java
@@ -27,20 +27,20 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateReque
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.query.MatchQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.InvalidAliasNameException;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.test.ESIntegTestCase;
-import java.io.IOException;
+import org.junit.After;
+
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
@@ -68,6 +68,11 @@ import static org.hamcrest.Matchers.nullValue;
*/
public class SimpleIndexTemplateIT extends ESIntegTestCase {
+ @After
+ public void cleanupTemplates() {
+ client().admin().indices().prepareDeleteTemplate("*").get();
+ }
+
public void testSimpleIndexTemplateTests() throws Exception {
// clean all templates setup by the framework.
client().admin().indices().prepareDeleteTemplate("*").get();
@@ -113,7 +118,9 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
// index something into test_index, will match on both templates
- client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get();
+ client().prepareIndex("test_index", "type1", "1")
+ .setSource("field1", "value1", "field2", "value 2")
+ .setRefreshPolicy(IMMEDIATE).get();
ensureGreen();
SearchResponse searchResponse = client().prepareSearch("test_index")
@@ -126,7 +133,9 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
// field2 is not stored.
assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue());
- client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get();
+ client().prepareIndex("text_index", "type1", "1")
+ .setSource("field1", "value1", "field2", "value 2")
+ .setRefreshPolicy(IMMEDIATE).get();
ensureGreen();
// now only match on one template (template_1)
@@ -164,9 +173,12 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
logger.info("--> explicitly delete template_1");
admin().indices().prepareDeleteTemplate("template_1").execute().actionGet();
- assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(1 + existingTemplates));
- assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_2"), equalTo(true));
- assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_1"), equalTo(false));
+
+ ClusterState state = admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.metaData().templates().size(), equalTo(1 + existingTemplates));
+ assertThat(state.metaData().templates().containsKey("template_2"), equalTo(true));
+ assertThat(state.metaData().templates().containsKey("template_1"), equalTo(false));
logger.info("--> put template_1 back");
@@ -181,11 +193,13 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
logger.info("--> delete template*");
admin().indices().prepareDeleteTemplate("template*").execute().actionGet();
- assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(existingTemplates));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(),
+ equalTo(existingTemplates));
logger.info("--> delete * with no templates, make sure we don't get a failure");
admin().indices().prepareDeleteTemplate("*").execute().actionGet();
- assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(0));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(),
+ equalTo(0));
}
public void testThatGetIndexTemplatesWorks() throws Exception {
@@ -193,6 +207,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
client().admin().indices().preparePutTemplate("template_1")
.setTemplate("te*")
.setOrder(0)
+ .setVersion(123)
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("field1").field("type", "text").field("store", true).endObject()
.startObject("field2").field("type", "keyword").field("store", true).endObject()
@@ -205,9 +220,11 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue()));
assertThat(getTemplate1Response.getIndexTemplates().get(0).getTemplate(), is("te*"));
assertThat(getTemplate1Response.getIndexTemplates().get(0).getOrder(), is(0));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getVersion(), is(123));
logger.info("--> get non-existing-template");
- GetIndexTemplatesResponse getTemplate2Response = client().admin().indices().prepareGetTemplates("non-existing-template").execute().actionGet();
+ GetIndexTemplatesResponse getTemplate2Response =
+ client().admin().indices().prepareGetTemplates("non-existing-template").execute().actionGet();
assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0));
}
@@ -348,7 +365,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
.filter(QueryBuilders.termsQuery("_type", "typeX", "typeY", "typeZ")))
.get();
- assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ"));
+ assertAcked(prepareCreate("test_index")
+ .addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ"));
ensureGreen();
client().prepareIndex("test_index", "type1", "1").setSource("field", "A value").get();
@@ -582,7 +600,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
.setOrder(0)
.addMapping("test", "field", "type=text")
.addAlias(new Alias("alias1").filter(termQuery("field", "value"))).get();
- // Indexing into b should succeed, because the field mapping for field 'field' is defined in the _default_ mapping and the test type exists.
+ // Indexing into b should succeed, because the field mapping for field 'field' is defined in the _default_ mapping and
+ // the test type exists.
client().admin().indices().preparePutTemplate("template2")
.setTemplate("b*")
.setOrder(0)
@@ -688,5 +707,21 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
}
+ public void testOrderAndVersion() {
+ int order = randomInt();
+ Integer version = randomBoolean() ? randomInt() : null;
+
+ assertAcked(client().admin().indices().preparePutTemplate("versioned_template")
+ .setTemplate("te*")
+ .setVersion(version)
+ .setOrder(order)
+ .addMapping("test", "field", "type=text")
+ .get());
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("versioned_template").get();
+ assertThat(response.getIndexTemplates().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getVersion(), equalTo(version));
+ assertThat(response.getIndexTemplates().get(0).getOrder(), equalTo(order));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java
index fa14695128..3906f82dc0 100644
--- a/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java
@@ -33,6 +33,7 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
@@ -239,6 +240,15 @@ public class IngestDocumentTests extends ESTestCase {
assertFalse(ingestDocument.hasField("list.10"));
}
+ public void testListHasFieldIndexOutOfBounds_fail() {
+ assertTrue(ingestDocument.hasField("list.0", true));
+ assertTrue(ingestDocument.hasField("list.1", true));
+ Exception e = expectThrows(IllegalArgumentException.class, () -> ingestDocument.hasField("list.2", true));
+ assertThat(e.getMessage(), equalTo("[2] is out of bounds for array with length [2] as part of path [list.2]"));
+ e = expectThrows(IllegalArgumentException.class, () -> ingestDocument.hasField("list.10", true));
+ assertThat(e.getMessage(), equalTo("[10] is out of bounds for array with length [2] as part of path [list.10]"));
+ }
+
public void testListHasFieldIndexNotNumeric() {
assertFalse(ingestDocument.hasField("list.test"));
}
@@ -995,34 +1005,4 @@ public class IngestDocumentTests extends ESTestCase {
}
}
- public static void assertIngestDocument(Object a, Object b) {
- if (a instanceof Map) {
- Map<?, ?> mapA = (Map<?, ?>) a;
- Map<?, ?> mapB = (Map<?, ?>) b;
- for (Map.Entry<?, ?> entry : mapA.entrySet()) {
- if (entry.getValue() instanceof List || entry.getValue() instanceof Map) {
- assertIngestDocument(entry.getValue(), mapB.get(entry.getKey()));
- }
- }
- } else if (a instanceof List) {
- List<?> listA = (List<?>) a;
- List<?> listB = (List<?>) b;
- for (int i = 0; i < listA.size(); i++) {
- Object value = listA.get(i);
- if (value instanceof List || value instanceof Map) {
- assertIngestDocument(value, listB.get(i));
- }
- }
- } else if (a instanceof byte[]) {
- assertArrayEquals((byte[]) a, (byte[])b);
- } else if (a instanceof IngestDocument) {
- IngestDocument docA = (IngestDocument) a;
- IngestDocument docB = (IngestDocument) b;
- assertIngestDocument(docA.getSourceAndMetadata(), docB.getSourceAndMetadata());
- assertIngestDocument(docA.getIngestMetadata(), docB.getIngestMetadata());
- } else {
- String msg = String.format(Locale.ROOT, "Expected %s class to be equal to %s", a.getClass().getName(), b.getClass().getName());
- assertThat(msg, a, equalTo(b));
- }
- }
}
diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java
index 08cde7e04d..3a842a4690 100644
--- a/core/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java
@@ -39,7 +39,7 @@ public class IngestServiceTests extends ESTestCase {
public void testIngestPlugin() {
ThreadPool tp = Mockito.mock(ThreadPool.class);
- IngestService ingestService = new IngestService(Settings.EMPTY, tp, null, null, Collections.singletonList(DUMMY_PLUGIN));
+ IngestService ingestService = new IngestService(Settings.EMPTY, tp, null, null, null, Collections.singletonList(DUMMY_PLUGIN));
Map<String, Processor.Factory> factories = ingestService.getPipelineStore().getProcessorFactories();
assertTrue(factories.containsKey("foo"));
assertEquals(1, factories.size());
@@ -48,7 +48,7 @@ public class IngestServiceTests extends ESTestCase {
public void testIngestPluginDuplicate() {
ThreadPool tp = Mockito.mock(ThreadPool.class);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
- new IngestService(Settings.EMPTY, tp, null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN))
+ new IngestService(Settings.EMPTY, tp, null, null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN))
);
assertTrue(e.getMessage(), e.getMessage().contains("already registered"));
}
diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java
index 4cad7e5ab6..8b22e4f0bc 100644
--- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java
@@ -60,6 +60,7 @@ import static org.mockito.Mockito.when;
public class PipelineExecutionServiceTests extends ESTestCase {
+ private final Integer version = randomBoolean() ? randomInt() : null;
private PipelineStore store;
private PipelineExecutionService executionService;
@@ -89,7 +90,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteBulkPipelineDoesNotExist() {
CompoundProcessor processor = mock(CompoundProcessor.class);
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", processor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor));
BulkRequest bulkRequest = new BulkRequest();
IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@@ -122,7 +123,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteSuccess() throws Exception {
CompoundProcessor processor = mock(CompoundProcessor.class);
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", processor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@SuppressWarnings("unchecked")
@@ -136,7 +137,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteEmptyPipeline() throws Exception {
CompoundProcessor processor = mock(CompoundProcessor.class);
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", processor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor));
when(processor.getProcessors()).thenReturn(Collections.emptyList());
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@@ -165,7 +166,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
}
return null;
}).when(processor).execute(any());
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", processor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@SuppressWarnings("unchecked")
@@ -189,7 +190,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteFailure() throws Exception {
CompoundProcessor processor = mock(CompoundProcessor.class);
when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class)));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", processor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
@SuppressWarnings("unchecked")
@@ -209,7 +210,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
Processor onFailureProcessor = mock(Processor.class);
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor),
Collections.singletonList(new CompoundProcessor(onFailureProcessor)));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", compoundProcessor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
@SuppressWarnings("unchecked")
@@ -226,7 +227,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
Processor onFailureProcessor = mock(Processor.class);
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor),
Collections.singletonList(new CompoundProcessor(onFailureProcessor)));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", compoundProcessor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
@@ -247,7 +248,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor),
Collections.singletonList(new CompoundProcessor(false, Collections.singletonList(onFailureProcessor),
Collections.singletonList(onFailureOnFailureProcessor))));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", compoundProcessor));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
doThrow(new RuntimeException()).when(onFailureOnFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap()));
@@ -264,7 +265,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteSetTTL() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> ingestDocument.setFieldValue("_ttl", "5d"));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", new CompoundProcessor(processor)));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, new CompoundProcessor(processor)));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@SuppressWarnings("unchecked")
@@ -280,7 +281,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
public void testExecuteSetInvalidTTL() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> ingestDocument.setFieldValue("_ttl", "abc"));
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", new CompoundProcessor(processor)));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, new CompoundProcessor(processor)));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id");
@SuppressWarnings("unchecked")
@@ -293,12 +294,14 @@ public class PipelineExecutionServiceTests extends ESTestCase {
}
public void testExecuteProvidedTTL() throws Exception {
- when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", mock(CompoundProcessor.class)));
+ when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, mock(CompoundProcessor.class)));
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline("_id")
.source(Collections.emptyMap())
.ttl(1000L);
+ @SuppressWarnings("unchecked")
Consumer<Exception> failureHandler = mock(Consumer.class);
+ @SuppressWarnings("unchecked")
Consumer<Boolean> completionHandler = mock(Consumer.class);
executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler);
@@ -334,9 +337,11 @@ public class PipelineExecutionServiceTests extends ESTestCase {
when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class)));
Exception error = new RuntimeException();
doThrow(error).when(processor).execute(any());
- when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, processor));
+ when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, processor));
+ @SuppressWarnings("unchecked")
BiConsumer<IndexRequest, Exception> requestItemErrorHandler = mock(BiConsumer.class);
+ @SuppressWarnings("unchecked")
Consumer<Exception> completionHandler = mock(Consumer.class);
executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler);
@@ -355,7 +360,7 @@ public class PipelineExecutionServiceTests extends ESTestCase {
bulkRequest.add(indexRequest);
}
- when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, new CompoundProcessor()));
+ when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, new CompoundProcessor()));
@SuppressWarnings("unchecked")
BiConsumer<IndexRequest, Exception> requestItemErrorHandler = mock(BiConsumer.class);
@@ -375,15 +380,17 @@ public class PipelineExecutionServiceTests extends ESTestCase {
assertThat(ingestStats.getTotalStats().getIngestFailedCount(), equalTo(0L));
assertThat(ingestStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L));
- when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, new CompoundProcessor(mock(Processor.class))));
- when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, new CompoundProcessor(mock(Processor.class))));
+ when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, version, new CompoundProcessor(mock(Processor.class))));
+ when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, null, new CompoundProcessor(mock(Processor.class))));
Map<String, PipelineConfiguration> configurationMap = new HashMap<>();
configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}")));
configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}")));
executionService.updatePipelineStats(new IngestMetadata(configurationMap));
+ @SuppressWarnings("unchecked")
Consumer<Exception> failureHandler = mock(Consumer.class);
+ @SuppressWarnings("unchecked")
Consumer<Boolean> completionHandler = mock(Consumer.class);
IndexRequest indexRequest = new IndexRequest("_index");
diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java
index b09d772729..461873a3fe 100644
--- a/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java
@@ -20,8 +20,6 @@
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
@@ -34,16 +32,19 @@ import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
-import static org.mockito.Mockito.mock;
public class PipelineFactoryTests extends ESTestCase {
+ private final Integer version = randomBoolean() ? randomInt() : null;
+ private final String versionString = version != null ? Integer.toString(version) : null;
+
public void testCreate() throws Exception {
Map<String, Object> processorConfig0 = new HashMap<>();
Map<String, Object> processorConfig1 = new HashMap<>();
processorConfig0.put(ConfigurationUtils.TAG_KEY, "first-processor");
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY,
Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1)));
Pipeline.Factory factory = new Pipeline.Factory();
@@ -51,6 +52,7 @@ public class PipelineFactoryTests extends ESTestCase {
Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
+ assertThat(pipeline.getVersion(), equalTo(version));
assertThat(pipeline.getProcessors().size(), equalTo(2));
assertThat(pipeline.getProcessors().get(0).getType(), equalTo("test-processor"));
assertThat(pipeline.getProcessors().get(0).getTag(), equalTo("first-processor"));
@@ -61,6 +63,7 @@ public class PipelineFactoryTests extends ESTestCase {
public void testCreateWithNoProcessorsField() throws Exception {
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
Pipeline.Factory factory = new Pipeline.Factory();
try {
factory.create("_id", pipelineConfig, Collections.emptyMap());
@@ -73,11 +76,13 @@ public class PipelineFactoryTests extends ESTestCase {
public void testCreateWithEmptyProcessorsField() throws Exception {
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList());
Pipeline.Factory factory = new Pipeline.Factory();
Pipeline pipeline = factory.create("_id", pipelineConfig, null);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
+ assertThat(pipeline.getVersion(), equalTo(version));
assertThat(pipeline.getProcessors(), is(empty()));
}
@@ -85,6 +90,7 @@ public class PipelineFactoryTests extends ESTestCase {
Map<String, Object> processorConfig = new HashMap<>();
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline.Factory factory = new Pipeline.Factory();
@@ -92,6 +98,7 @@ public class PipelineFactoryTests extends ESTestCase {
Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
+ assertThat(pipeline.getVersion(), equalTo(version));
assertThat(pipeline.getProcessors().size(), equalTo(1));
assertThat(pipeline.getProcessors().get(0).getType(), equalTo("test-processor"));
assertThat(pipeline.getOnFailureProcessors().size(), equalTo(1));
@@ -102,6 +109,7 @@ public class PipelineFactoryTests extends ESTestCase {
Map<String, Object> processorConfig = new HashMap<>();
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList());
Pipeline.Factory factory = new Pipeline.Factory();
@@ -115,6 +123,7 @@ public class PipelineFactoryTests extends ESTestCase {
processorConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList());
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline.Factory factory = new Pipeline.Factory();
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
@@ -130,12 +139,14 @@ public class PipelineFactoryTests extends ESTestCase {
Pipeline.Factory factory = new Pipeline.Factory();
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY,
Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
+ assertThat(pipeline.getVersion(), equalTo(version));
assertThat(pipeline.getProcessors().size(), equalTo(1));
assertThat(pipeline.getOnFailureProcessors().size(), equalTo(0));
@@ -149,6 +160,7 @@ public class PipelineFactoryTests extends ESTestCase {
processorConfig.put("unused", "value");
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline.Factory factory = new Pipeline.Factory();
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
@@ -162,12 +174,14 @@ public class PipelineFactoryTests extends ESTestCase {
Map<String, Object> pipelineConfig = new HashMap<>();
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
+ pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline.Factory factory = new Pipeline.Factory();
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
+ assertThat(pipeline.getVersion(), equalTo(version));
assertThat(pipeline.getProcessors().size(), equalTo(1));
assertThat(pipeline.getProcessors().get(0).getType(), equalTo("compound"));
}
@@ -177,7 +191,7 @@ public class PipelineFactoryTests extends ESTestCase {
CompoundProcessor processor1 = new CompoundProcessor(testProcessor, testProcessor);
CompoundProcessor processor2 =
new CompoundProcessor(false, Collections.singletonList(testProcessor), Collections.singletonList(testProcessor));
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor1, processor2));
+ Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor1, processor2));
List<Processor> flattened = pipeline.flattenAllProcessors();
assertThat(flattened.size(), equalTo(4));
}
diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java
index 29032ae432..cdbe1e1157 100644
--- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java
@@ -98,7 +98,8 @@ public class PipelineStoreTests extends ESTestCase {
public void testUpdatePipelines() {
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
- store.innerUpdatePipelines(clusterState);
+ ClusterState previousClusterState = clusterState;
+ store.innerUpdatePipelines(previousClusterState, clusterState);
assertThat(store.pipelines.size(), is(0));
PipelineConfiguration pipeline = new PipelineConfiguration(
@@ -108,7 +109,7 @@ public class PipelineStoreTests extends ESTestCase {
clusterState = ClusterState.builder(clusterState)
.metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata))
.build();
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
assertThat(store.pipelines.size(), is(1));
assertThat(store.pipelines.get("_id").getId(), equalTo("_id"));
assertThat(store.pipelines.get("_id").getDescription(), nullValue());
@@ -124,8 +125,9 @@ public class PipelineStoreTests extends ESTestCase {
// add a new pipeline:
PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"));
+ ClusterState previousClusterState = clusterState;
clusterState = store.innerPut(putRequest, clusterState);
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
pipeline = store.get(id);
assertThat(pipeline, notNullValue());
assertThat(pipeline.getId(), equalTo(id));
@@ -134,8 +136,9 @@ public class PipelineStoreTests extends ESTestCase {
// overwrite existing pipeline:
putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"));
+ previousClusterState = clusterState;
clusterState = store.innerPut(putRequest, clusterState);
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
pipeline = store.get(id);
assertThat(pipeline, notNullValue());
assertThat(pipeline.getId(), equalTo(id));
@@ -150,9 +153,10 @@ public class PipelineStoreTests extends ESTestCase {
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"));
+ ClusterState previousClusterState = clusterState;
clusterState = store.innerPut(putRequest, clusterState);
try {
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
fail("should fail");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[processors] required property is missing"));
@@ -166,16 +170,18 @@ public class PipelineStoreTests extends ESTestCase {
"_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}")
);
IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config));
- ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
- .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata))
- .build();
- store.innerUpdatePipelines(clusterState);
+ ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
+ ClusterState previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder()
+ .putCustom(IngestMetadata.TYPE, ingestMetadata)).build();
+ store.innerUpdatePipelines(previousClusterState, clusterState);
assertThat(store.get("_id"), notNullValue());
// Delete pipeline:
DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id");
+ previousClusterState = clusterState;
clusterState = store.innerDelete(deleteRequest, clusterState);
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
assertThat(store.get("_id"), nullValue());
// Delete existing pipeline:
@@ -236,8 +242,9 @@ public class PipelineStoreTests extends ESTestCase {
PutPipelineRequest putRequest = new PutPipelineRequest(id,
new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"));
+ ClusterState previousClusterState = clusterState;
clusterState = store.innerPut(putRequest, clusterState);
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
pipeline = store.get(id);
assertThat(pipeline, notNullValue());
assertThat(pipeline.getId(), equalTo(id));
@@ -246,8 +253,9 @@ public class PipelineStoreTests extends ESTestCase {
assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set"));
DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id);
+ previousClusterState = clusterState;
clusterState = store.innerDelete(deleteRequest, clusterState);
- store.innerUpdatePipelines(clusterState);
+ store.innerUpdatePipelines(previousClusterState, clusterState);
pipeline = store.get(id);
assertThat(pipeline, nullValue());
}
diff --git a/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java b/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java
index 15fac5ab4c..e022bd7521 100644
--- a/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java
+++ b/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java
@@ -108,7 +108,7 @@ public class SimpleMgetIT extends ESIntegTestCase {
public void testThatSourceFilteringIsSupported() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
BytesReference sourceBytesRef = jsonBuilder().startObject()
- .field("field", "1", "2")
+ .array("field", "1", "2")
.startObject("included").field("field", "should be seen").field("hidden_field", "should not be seen").endObject()
.field("excluded", "should not be seen")
.endObject().bytes();
diff --git a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
index 8eba98ae0b..14f7151e40 100644
--- a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
@@ -141,7 +141,7 @@ public class FsProbeTests extends ESTestCase {
" 253 1 dm-1 112 0 4624 13 0 0 0 0 0 5 13",
" 253 2 dm-2 48045 0 714866 49369 1372291 0 64128568 33730766 0 1058347 33782056"));
- final FsInfo previous = new FsInfo(System.currentTimeMillis(), first, null);
+ final FsInfo previous = new FsInfo(System.currentTimeMillis(), first, new FsInfo.Path[0]);
final FsInfo.IoStats second = probe.ioStats(devicesNumbers, previous);
assertNotNull(second);
assertThat(second.devicesStats[0].majorDeviceNumber, equalTo(253));
diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java
index ab5b1ac475..ca3a1d7070 100644
--- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java
@@ -19,7 +19,7 @@
package org.elasticsearch.monitor.jvm;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
@@ -32,7 +32,7 @@ import static org.mockito.Mockito.when;
public class JvmGcMonitorServiceTests extends ESTestCase {
public void testSlowGcLogging() {
- final ESLogger logger = mock(ESLogger.class);
+ final Logger logger = mock(Logger.class);
when(logger.isWarnEnabled()).thenReturn(true);
when(logger.isInfoEnabled()).thenReturn(true);
when(logger.isDebugEnabled()).thenReturn(true);
@@ -64,7 +64,7 @@ public class JvmGcMonitorServiceTests extends ESTestCase {
when(gc.getCollectionCount()).thenReturn(totalCollectionCount);
when(gc.getCollectionTime()).thenReturn(totalCollectionTime);
- final ByteSizeValue maxHeapUsed = new ByteSizeValue(Math.max(lastHeapUsed.bytes(), currentHeapUsed.bytes()) + 1 << 10);
+ final ByteSizeValue maxHeapUsed = new ByteSizeValue(Math.max(lastHeapUsed.getBytes(), currentHeapUsed.getBytes()) + 1 << 10);
JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent = new JvmGcMonitorService.JvmMonitor.SlowGcEvent(
gc,
@@ -138,7 +138,7 @@ public class JvmGcMonitorServiceTests extends ESTestCase {
final int current = randomIntBetween(1, Integer.MAX_VALUE);
final long elapsed = randomIntBetween(current, Integer.MAX_VALUE);
final long seq = randomIntBetween(1, Integer.MAX_VALUE);
- final ESLogger logger = mock(ESLogger.class);
+ final Logger logger = mock(Logger.class);
when(logger.isWarnEnabled()).thenReturn(true);
when(logger.isInfoEnabled()).thenReturn(true);
when(logger.isDebugEnabled()).thenReturn(true);
diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java
index 82b264ae1d..131593cd11 100644
--- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmInfoTests.java
@@ -57,5 +57,4 @@ public class JvmInfoTests extends ESTestCase {
final int index = argline.lastIndexOf(flag);
return argline.charAt(index - 1) == '+';
}
-
}
diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java
index d0b1d54171..aa59a914dc 100644
--- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java
@@ -45,7 +45,7 @@ public class JvmStatsTests extends ESTestCase {
assertNotNull(mem);
for (ByteSizeValue heap : Arrays.asList(mem.getHeapCommitted(), mem.getHeapMax(), mem.getHeapUsed(), mem.getNonHeapCommitted())) {
assertNotNull(heap);
- assertThat(heap.bytes(), greaterThanOrEqualTo(0L));
+ assertThat(heap.getBytes(), greaterThanOrEqualTo(0L));
}
assertNotNull(mem.getHeapUsedPercent());
assertThat(mem.getHeapUsedPercent(), anyOf(equalTo((short) -1), greaterThanOrEqualTo((short) 0)));
@@ -78,9 +78,9 @@ public class JvmStatsTests extends ESTestCase {
assertTrue(Strings.hasText(bufferPool.getName()));
assertThat(bufferPool.getCount(), greaterThanOrEqualTo(0L));
assertNotNull(bufferPool.getTotalCapacity());
- assertThat(bufferPool.getTotalCapacity().bytes(), greaterThanOrEqualTo(0L));
+ assertThat(bufferPool.getTotalCapacity().getBytes(), greaterThanOrEqualTo(0L));
assertNotNull(bufferPool.getUsed());
- assertThat(bufferPool.getUsed().bytes(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L)));
+ assertThat(bufferPool.getUsed().getBytes(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L)));
}
}
diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java
index 2edaad5c4b..da9169dcdb 100644
--- a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java
@@ -32,24 +32,28 @@ import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class OsProbeTests extends ESTestCase {
- OsProbe probe = OsProbe.getInstance();
+ private final OsProbe probe = OsProbe.getInstance();
public void testOsInfo() {
- OsInfo info = probe.osInfo();
+ int allocatedProcessors = randomIntBetween(1, Runtime.getRuntime().availableProcessors());
+ long refreshInterval = randomBoolean() ? -1 : randomPositiveLong();
+ OsInfo info = probe.osInfo(refreshInterval, allocatedProcessors);
assertNotNull(info);
- assertThat(info.getRefreshInterval(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L)));
- assertThat(info.getName(), equalTo(Constants.OS_NAME));
- assertThat(info.getArch(), equalTo(Constants.OS_ARCH));
- assertThat(info.getVersion(), equalTo(Constants.OS_VERSION));
- assertThat(info.getAvailableProcessors(), equalTo(Runtime.getRuntime().availableProcessors()));
+ assertEquals(refreshInterval, info.getRefreshInterval());
+ assertEquals(Constants.OS_NAME, info.getName());
+ assertEquals(Constants.OS_ARCH, info.getArch());
+ assertEquals(Constants.OS_VERSION, info.getVersion());
+ assertEquals(allocatedProcessors, info.getAllocatedProcessors());
+ assertEquals(Runtime.getRuntime().availableProcessors(), info.getAvailableProcessors());
}
public void testOsStats() {
OsStats stats = probe.osStats();
assertNotNull(stats);
assertThat(stats.getTimestamp(), greaterThan(0L));
- assertThat(stats.getCpu().getPercent(), anyOf(equalTo((short) -1), is(both(greaterThanOrEqualTo((short) 0)).and(lessThanOrEqualTo((short) 100)))));
- double[] loadAverage = stats.getCpu().loadAverage;
+ assertThat(stats.getCpu().getPercent(), anyOf(equalTo((short) -1),
+ is(both(greaterThanOrEqualTo((short) 0)).and(lessThanOrEqualTo((short) 100)))));
+ double[] loadAverage = stats.getCpu().getLoadAverage();
if (loadAverage != null) {
assertThat(loadAverage.length, equalTo(3));
}
@@ -84,25 +88,25 @@ public class OsProbeTests extends ESTestCase {
}
assertNotNull(stats.getMem());
- assertThat(stats.getMem().getTotal().bytes(), greaterThan(0L));
- assertThat(stats.getMem().getFree().bytes(), greaterThan(0L));
+ assertThat(stats.getMem().getTotal().getBytes(), greaterThan(0L));
+ assertThat(stats.getMem().getFree().getBytes(), greaterThan(0L));
assertThat(stats.getMem().getFreePercent(), allOf(greaterThanOrEqualTo((short) 0), lessThanOrEqualTo((short) 100)));
- assertThat(stats.getMem().getUsed().bytes(), greaterThan(0L));
+ assertThat(stats.getMem().getUsed().getBytes(), greaterThan(0L));
assertThat(stats.getMem().getUsedPercent(), allOf(greaterThanOrEqualTo((short) 0), lessThanOrEqualTo((short) 100)));
assertNotNull(stats.getSwap());
assertNotNull(stats.getSwap().getTotal());
- long total = stats.getSwap().getTotal().bytes();
+ long total = stats.getSwap().getTotal().getBytes();
if (total > 0) {
- assertThat(stats.getSwap().getTotal().bytes(), greaterThan(0L));
- assertThat(stats.getSwap().getFree().bytes(), greaterThan(0L));
- assertThat(stats.getSwap().getUsed().bytes(), greaterThanOrEqualTo(0L));
+ assertThat(stats.getSwap().getTotal().getBytes(), greaterThan(0L));
+ assertThat(stats.getSwap().getFree().getBytes(), greaterThan(0L));
+ assertThat(stats.getSwap().getUsed().getBytes(), greaterThanOrEqualTo(0L));
} else {
// On platforms with no swap
- assertThat(stats.getSwap().getTotal().bytes(), equalTo(0L));
- assertThat(stats.getSwap().getFree().bytes(), equalTo(0L));
- assertThat(stats.getSwap().getUsed().bytes(), equalTo(0L));
+ assertThat(stats.getSwap().getTotal().getBytes(), equalTo(0L));
+ assertThat(stats.getSwap().getFree().getBytes(), equalTo(0L));
+ assertThat(stats.getSwap().getUsed().getBytes(), equalTo(0L));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java
new file mode 100644
index 0000000000..30d527311b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+
+public class OsStatsTests extends ESTestCase {
+
+ public void testSerialization() throws IOException {
+ int numLoadAverages = randomIntBetween(1, 5);
+ double loadAverages[] = new double[numLoadAverages];
+ for (int i = 0; i < loadAverages.length; i++) {
+ loadAverages[i] = randomDouble();
+ }
+ OsStats.Cpu cpu = new OsStats.Cpu(randomShort(), loadAverages);
+ OsStats.Mem mem = new OsStats.Mem(randomLong(), randomLong());
+ OsStats.Swap swap = new OsStats.Swap(randomLong(), randomLong());
+ OsStats osStats = new OsStats(System.currentTimeMillis(), cpu, mem, swap);
+
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ osStats.writeTo(out);
+ try (StreamInput in = out.bytes().streamInput()) {
+ OsStats deserializedOsStats = new OsStats(in);
+ assertEquals(osStats.getTimestamp(), deserializedOsStats.getTimestamp());
+ assertEquals(osStats.getCpu().getPercent(), deserializedOsStats.getCpu().getPercent());
+ assertArrayEquals(osStats.getCpu().getLoadAverage(), deserializedOsStats.getCpu().getLoadAverage(), 0);
+ assertEquals(osStats.getMem().getFree(), deserializedOsStats.getMem().getFree());
+ assertEquals(osStats.getMem().getTotal(), deserializedOsStats.getMem().getTotal());
+ assertEquals(osStats.getSwap().getFree(), deserializedOsStats.getSwap().getFree());
+ assertEquals(osStats.getSwap().getTotal(), deserializedOsStats.getSwap().getTotal());
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
index 8e6016f6f9..f8fc6a675c 100644
--- a/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
@@ -33,14 +33,15 @@ import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class ProcessProbeTests extends ESTestCase {
- ProcessProbe probe = ProcessProbe.getInstance();
+ private final ProcessProbe probe = ProcessProbe.getInstance();
public void testProcessInfo() {
- ProcessInfo info = probe.processInfo();
+ long refreshInterval = randomPositiveLong();
+ ProcessInfo info = probe.processInfo(refreshInterval);
assertNotNull(info);
- assertThat(info.getRefreshInterval(), greaterThanOrEqualTo(0L));
- assertThat(info.getId(), equalTo(jvmInfo().pid()));
- assertThat(info.isMlockall(), equalTo(BootstrapInfo.isMemoryLocked()));
+ assertEquals(refreshInterval, info.getRefreshInterval());
+ assertEquals(jvmInfo().pid(), info.getId());
+ assertEquals(BootstrapInfo.isMemoryLocked(), info.isMlockall());
}
public void testProcessStats() {
@@ -64,11 +65,11 @@ public class ProcessProbeTests extends ESTestCase {
assertThat(cpu.getPercent(), anyOf(lessThan((short) 0), allOf(greaterThanOrEqualTo((short) 0), lessThanOrEqualTo((short) 100))));
// CPU time can return -1 if the platform does not support this operation, let's see which platforms fail
- assertThat(cpu.total, greaterThan(0L));
+ assertThat(cpu.getTotal().millis(), greaterThan(0L));
ProcessStats.Mem mem = stats.getMem();
assertNotNull(mem);
// Commited total virtual memory can return -1 if not supported, let's see which platforms fail
- assertThat(mem.totalVirtual, greaterThan(0L));
+ assertThat(mem.getTotalVirtual().getBytes(), greaterThan(0L));
}
}
diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java
index 7cd4e35521..d9134ba5cf 100644
--- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java
+++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java
@@ -20,7 +20,6 @@
package org.elasticsearch.nodesinfo;
import org.elasticsearch.Build;
-import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -35,11 +34,11 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.ingest.IngestInfo;
+import org.elasticsearch.ingest.ProcessorInfo;
import org.elasticsearch.monitor.jvm.JvmInfo;
-import org.elasticsearch.monitor.os.DummyOsInfo;
import org.elasticsearch.monitor.os.OsInfo;
import org.elasticsearch.monitor.process.ProcessInfo;
-import org.elasticsearch.plugins.DummyPluginInfo;
+import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.threadpool.ThreadPool;
@@ -48,7 +47,6 @@ import org.elasticsearch.transport.TransportInfo;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -58,25 +56,20 @@ import static java.util.Collections.emptySet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.core.IsEqual.equalTo;
-/**
- *
- */
public class NodeInfoStreamingTests extends ESTestCase {
public void testNodeInfoStreaming() throws IOException {
NodeInfo nodeInfo = createNodeInfo();
- Version version = Version.CURRENT;
- BytesStreamOutput out = new BytesStreamOutput();
- out.setVersion(version);
- nodeInfo.writeTo(out);
- out.close();
- StreamInput in = out.bytes().streamInput();
- in.setVersion(version);
- NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in);
- assertExpectedUnchanged(nodeInfo, readNodeInfo);
-
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ nodeInfo.writeTo(out);
+ try (StreamInput in = out.bytes().streamInput()) {
+ NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in);
+ assertExpectedUnchanged(nodeInfo, readNodeInfo);
+ }
+ }
}
- // checks all properties that are expected to be unchanged. Once we start changing them between versions this method has to be changed as well
+ // checks all properties that are expected to be unchanged.
+ // Once we start changing them between versions this method has to be changed as well
private void assertExpectedUnchanged(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException {
assertThat(nodeInfo.getBuild().toString(), equalTo(readNodeInfo.getBuild().toString()));
assertThat(nodeInfo.getHostname(), equalTo(readNodeInfo.getHostname()));
@@ -89,24 +82,15 @@ public class NodeInfoStreamingTests extends ESTestCase {
compareJsonOutput(nodeInfo.getTransport(), readNodeInfo.getTransport());
compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode());
compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs());
- comparePluginsAndModules(nodeInfo, readNodeInfo);
+ compareJsonOutput(nodeInfo.getPlugins(), readNodeInfo.getPlugins());
compareJsonOutput(nodeInfo.getIngest(), readNodeInfo.getIngest());
}
- private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException {
- ToXContent.Params params = ToXContent.EMPTY_PARAMS;
- XContentBuilder pluginsAndModules = jsonBuilder();
- pluginsAndModules.startObject();
- nodeInfo.getPlugins().toXContent(pluginsAndModules, params);
- pluginsAndModules.endObject();
- XContentBuilder readPluginsAndModules = jsonBuilder();
- readPluginsAndModules.startObject();
- readNodeInfo.getPlugins().toXContent(readPluginsAndModules, params);
- readPluginsAndModules.endObject();
- assertThat(pluginsAndModules.string(), equalTo(readPluginsAndModules.string()));
- }
-
private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException {
+ if (param1 == null) {
+ assertNull(param2);
+ return;
+ }
ToXContent.Params params = ToXContent.EMPTY_PARAMS;
XContentBuilder param1Builder = jsonBuilder();
param1Builder.startObject();
@@ -120,36 +104,73 @@ public class NodeInfoStreamingTests extends ESTestCase {
assertThat(param1Builder.string(), equalTo(param2Builder.string()));
}
- private NodeInfo createNodeInfo() {
+ private static NodeInfo createNodeInfo() {
Build build = Build.CURRENT;
DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(),
emptyMap(), emptySet(), VersionUtils.randomVersion(random()));
- Map<String, String> serviceAttributes = new HashMap<>();
- serviceAttributes.put("test", "attribute");
- Settings settings = Settings.builder().put("test", "setting").build();
- OsInfo osInfo = DummyOsInfo.INSTANCE;
- ProcessInfo process = new ProcessInfo(randomInt(), randomBoolean());
- JvmInfo jvm = JvmInfo.jvmInfo();
- List<ThreadPool.Info> threadPoolInfos = new ArrayList<>();
- threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5));
- ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos);
+ Settings settings = randomBoolean() ? null : Settings.builder().put("test", "setting").build();
+ OsInfo osInfo = null;
+ if (randomBoolean()) {
+ int availableProcessors = randomIntBetween(1, 64);
+ int allocatedProcessors = randomIntBetween(1, availableProcessors);
+ long refreshInterval = randomBoolean() ? -1 : randomPositiveLong();
+ String name = randomAsciiOfLengthBetween(3, 10);
+ String arch = randomAsciiOfLengthBetween(3, 10);
+ String version = randomAsciiOfLengthBetween(3, 10);
+ osInfo = new OsInfo(refreshInterval, availableProcessors, allocatedProcessors, name, arch, version);
+ }
+ ProcessInfo process = randomBoolean() ? null : new ProcessInfo(randomInt(), randomBoolean(), randomPositiveLong());
+ JvmInfo jvm = randomBoolean() ? null : JvmInfo.jvmInfo();
+ ThreadPoolInfo threadPoolInfo = null;
+ if (randomBoolean()) {
+ int numThreadPools = randomIntBetween(1, 10);
+ List<ThreadPool.Info> threadPoolInfos = new ArrayList<>(numThreadPools);
+ for (int i = 0; i < numThreadPools; i++) {
+ threadPoolInfos.add(new ThreadPool.Info(randomAsciiOfLengthBetween(3, 10),
+ randomFrom(ThreadPool.ThreadPoolType.values()), randomInt()));
+ }
+ threadPoolInfo = new ThreadPoolInfo(threadPoolInfos);
+ }
Map<String, BoundTransportAddress> profileAddresses = new HashMap<>();
- BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique());
+ BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(
+ new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique());
profileAddresses.put("test_address", dummyBoundTransportAddress);
- TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses);
- HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong());
- PluginsAndModules plugins = new PluginsAndModules();
- plugins.addModule(DummyPluginInfo.INSTANCE);
- plugins.addPlugin(DummyPluginInfo.INSTANCE);
- IngestInfo ingestInfo = new IngestInfo(Collections.emptyList());
- ByteSizeValue indexingBuffer;
- if (random().nextBoolean()) {
- indexingBuffer = null;
- } else {
+ TransportInfo transport = randomBoolean() ? null : new TransportInfo(dummyBoundTransportAddress, profileAddresses);
+ HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomLong());
+
+ PluginsAndModules pluginsAndModules = null;
+ if (randomBoolean()) {
+ int numPlugins = randomIntBetween(0, 5);
+ List<PluginInfo> plugins = new ArrayList<>();
+ for (int i = 0; i < numPlugins; i++) {
+ plugins.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10),
+ randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10)));
+ }
+ int numModules = randomIntBetween(0, 5);
+ List<PluginInfo> modules = new ArrayList<>();
+ for (int i = 0; i < numModules; i++) {
+ modules.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10),
+ randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10)));
+ }
+ pluginsAndModules = new PluginsAndModules(plugins, modules);
+ }
+
+ IngestInfo ingestInfo = null;
+ if (randomBoolean()) {
+ int numProcessors = randomIntBetween(0, 5);
+ List<ProcessorInfo> processors = new ArrayList<>(numProcessors);
+ for (int i = 0; i < numProcessors; i++) {
+ processors.add(new ProcessorInfo(randomAsciiOfLengthBetween(3, 10)));
+ }
+ ingestInfo = new IngestInfo(processors);
+ }
+
+ ByteSizeValue indexingBuffer = null;
+ if (randomBoolean()) {
// pick a random long that sometimes exceeds an int:
indexingBuffer = new ByteSizeValue(random().nextLong() & ((1L<<40)-1));
}
return new NodeInfo(VersionUtils.randomVersion(random()), build, node, settings, osInfo, process, jvm,
- threadPoolInfo, transport, htttpInfo, plugins, ingestInfo, indexingBuffer);
+ threadPoolInfo, transport, httpInfo, pluginsAndModules, ingestInfo, indexingBuffer);
}
}
diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java
index 0916cad60d..8a4c50f8a9 100644
--- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java
+++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java
@@ -97,22 +97,22 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
assertThat(response.getNodes().size(), is(2));
assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer());
- assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().bytes(), greaterThan(0L));
+ assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer());
- assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().bytes(), greaterThan(0L));
+ assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
// again, using only the indices flag
response = client().admin().cluster().prepareNodesInfo().clear().setIndices(true).execute().actionGet();
assertThat(response.getNodes().size(), is(2));
assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer());
- assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().bytes(), greaterThan(0L));
+ assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer());
- assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().bytes(), greaterThan(0L));
+ assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
}
public void testAllocatedProcessors() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java
index 73b31b9263..4ad52be886 100644
--- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java
@@ -23,8 +23,9 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
import org.elasticsearch.test.ESTestCase;
-import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
@@ -201,15 +202,17 @@ public class PluginInfoTests extends ESTestCase {
}
public void testPluginListSorted() {
- PluginsAndModules pluginsInfo = new PluginsAndModules();
- pluginsInfo.addPlugin(new PluginInfo("c", "foo", "dummy", "dummyclass"));
- pluginsInfo.addPlugin(new PluginInfo("b", "foo", "dummy", "dummyclass"));
- pluginsInfo.addPlugin(new PluginInfo("e", "foo", "dummy", "dummyclass"));
- pluginsInfo.addPlugin(new PluginInfo("a", "foo", "dummy", "dummyclass"));
- pluginsInfo.addPlugin(new PluginInfo("d", "foo", "dummy", "dummyclass"));
+ List<PluginInfo> plugins = new ArrayList<>();
+ plugins.add(new PluginInfo("c", "foo", "dummy", "dummyclass"));
+ plugins.add(new PluginInfo("b", "foo", "dummy", "dummyclass"));
+ plugins.add(new PluginInfo("e", "foo", "dummy", "dummyclass"));
+ plugins.add(new PluginInfo("a", "foo", "dummy", "dummyclass"));
+ plugins.add(new PluginInfo("d", "foo", "dummy", "dummyclass"));
+ PluginsAndModules pluginsInfo = new PluginsAndModules(plugins, Collections.emptyList());
+
final List<PluginInfo> infos = pluginsInfo.getPluginInfos();
- List<String> names = infos.stream().map((input) -> input.getName()).collect(Collectors.toList());
+ List<String> names = infos.stream().map(PluginInfo::getName).collect(Collectors.toList());
assertThat(names, contains("a", "b", "c", "d", "e"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java
index 26872ca11b..75904e69c2 100644
--- a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java
+++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java
@@ -24,6 +24,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.MapBuilder;
@@ -77,7 +78,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
internalCluster().startNodesAsync(2, settings).get();
// make sure the cluster state is green, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));
logger.info("--> add two more nodes");
internalCluster().startNodesAsync(2, settings).get();
@@ -86,7 +87,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
setMinimumMasterNodes(3);
// make sure the cluster state is green, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("5"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5"));
logger.info("--> refreshing and checking data");
refresh();
@@ -97,14 +98,14 @@ public class FullRollingRestartIT extends ESIntegTestCase {
// now start shutting nodes down
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("4"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4"));
// going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
// node, but that's OK as it is set to 3 before.
setMinimumMasterNodes(2);
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));
logger.info("--> stopped two nodes, verifying data");
refresh();
@@ -115,14 +116,14 @@ public class FullRollingRestartIT extends ESIntegTestCase {
// closing the 3rd node
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("2"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2"));
// closing the 2nd node
setMinimumMasterNodes(1);
internalCluster().stopRandomDataNode();
// make sure the cluster state is yellow, and all has been recovered
- assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("1"));
logger.info("--> one node left, verifying data");
refresh();
@@ -151,7 +152,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
ClusterState state = client().admin().cluster().prepareState().get().getState();
RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
- assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION);
+ assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false);
}
internalCluster().restartRandomDataNode();
ensureGreen();
@@ -159,7 +160,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
- assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION);
+ assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java
index 582be02d45..5b1073beff 100644
--- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveriesCollection;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThan;
public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase {
- static final RecoveryTargetService.RecoveryListener listener = new RecoveryTargetService.RecoveryListener() {
+ static final PeerRecoveryTargetService.RecoveryListener listener = new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
@@ -72,7 +72,7 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase {
final AtomicBoolean failed = new AtomicBoolean();
final CountDownLatch latch = new CountDownLatch(1);
final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica(),
- new RecoveryTargetService.RecoveryListener() {
+ new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
latch.countDown();
@@ -154,10 +154,9 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase {
}
long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard indexShard,
- RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) {
+ PeerRecoveryTargetService.RecoveryListener listener, TimeValue timeValue) {
final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId());
- indexShard.markAsRecovering("remote", new RecoveryState(indexShard.shardId(), false, RecoveryState.Type.REPLICA, sourceNode,
- rNode));
+ indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode));
indexShard.prepareForIndexRecovery();
return collection.startRecovery(indexShard, sourceNode, listener, timeValue);
}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
index 63b572f4d5..d0dd969d0c 100644
--- a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
@@ -19,16 +19,16 @@
package org.elasticsearch.recovery;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@@ -42,6 +42,7 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.Arrays;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
@@ -52,9 +53,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
-@TestLogging("_root:DEBUG,index.shard:TRACE")
+@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE")
public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
- private final ESLogger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class);
+ private final Logger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class);
public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception {
logger.info("--> creating test index ...");
@@ -105,7 +106,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
logger.info("--> refreshing the index");
refreshAndAssert();
logger.info("--> verifying indexed content");
- iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ iterateAssertCount(numberOfShards, 10, indexer.getIds());
}
}
@@ -156,7 +157,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
logger.info("--> refreshing the index");
refreshAndAssert();
logger.info("--> verifying indexed content");
- iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ iterateAssertCount(numberOfShards, 10, indexer.getIds());
}
}
@@ -193,7 +194,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
allowNodes("test", 4);
logger.info("--> waiting for GREEN health status ...");
- assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus().setWaitForRelocatingShards(0));
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus().setWaitForNoRelocatingShards(true));
logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
waitForDocs(totalNumDocs, indexer);
@@ -204,28 +205,28 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
logger.info("--> allow 3 nodes for index [test] ...");
allowNodes("test", 3);
logger.info("--> waiting for relocations ...");
- assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true));
logger.info("--> allow 2 nodes for index [test] ...");
allowNodes("test", 2);
logger.info("--> waiting for relocations ...");
- assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true));
logger.info("--> allow 1 nodes for index [test] ...");
allowNodes("test", 1);
logger.info("--> waiting for relocations ...");
- assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true));
logger.info("--> marking and waiting for indexing threads to stop ...");
indexer.stop();
logger.info("--> indexing threads stopped");
- assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true));
logger.info("--> refreshing the index");
refreshAndAssert();
logger.info("--> verifying indexed content");
- iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ iterateAssertCount(numberOfShards, 10, indexer.getIds());
}
}
@@ -263,11 +264,12 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
logger.info("--> refreshing the index");
refreshAndAssert();
logger.info("--> verifying indexed content");
- iterateAssertCount(numShards, indexer.totalIndexedDocs(), 10);
+ iterateAssertCount(numShards, 10, indexer.getIds());
}
}
- private void iterateAssertCount(final int numberOfShards, final long numberOfDocs, final int iterations) throws Exception {
+ private void iterateAssertCount(final int numberOfShards, final int iterations, final Set<String> ids) throws Exception {
+ final long numberOfDocs = ids.size();
SearchResponse[] iterationResults = new SearchResponse[iterations];
boolean error = false;
for (int i = 0; i < iterations; i++) {
@@ -290,12 +292,11 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
ClusterService clusterService = clusterService();
final ClusterState state = clusterService.state();
for (int shard = 0; shard < numberOfShards; shard++) {
- // background indexer starts using ids on 1
- for (int id = 1; id <= numberOfDocs; id++) {
- ShardId docShard = clusterService.operationRouting().shardId(state, "test", Long.toString(id), null);
+ for (String id : ids) {
+ ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null);
if (docShard.id() == shard) {
for (ShardRouting shardRouting : state.routingTable().shardRoutingTable("test", shard)) {
- GetResponse response = client().prepareGet("test", "type", Long.toString(id))
+ GetResponse response = client().prepareGet("test", "type", id)
.setPreference("_only_nodes:" + shardRouting.currentNodeId()).get();
if (response.isExists()) {
logger.info("missing id [{}] on shard {}", id, shardRouting);
@@ -321,6 +322,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
TimeUnit.MINUTES
)
);
+ assertEquals(numberOfDocs, ids.size());
}
//lets now make the test fail if it was supposed to fail
diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java
index fe8bad5642..b661761e52 100644
--- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java
+++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java
@@ -22,7 +22,6 @@ package org.elasticsearch.recovery;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.procedures.IntProcedure;
import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.English;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
@@ -46,7 +45,7 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
@@ -91,7 +90,7 @@ import static org.hamcrest.Matchers.startsWith;
/**
*/
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
-@TestLogging("_root:DEBUG,indices.recovery:TRACE,index.shard.service:TRACE")
+@TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.index.shard.service:TRACE")
public class RelocationIT extends ESIntegTestCase {
private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES);
@@ -137,9 +136,9 @@ public class RelocationIT extends ESIntegTestCase {
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
.execute().actionGet();
- clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
- clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> verifying count again...");
@@ -200,9 +199,9 @@ public class RelocationIT extends ESIntegTestCase {
logger.debug("--> flushing");
client().admin().indices().prepareFlush().get();
}
- ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
- clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
indexer.pauseIndexing();
logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
@@ -219,7 +218,7 @@ public class RelocationIT extends ESIntegTestCase {
for (int i = 0; i < 10; i++) {
try {
logger.info("--> START search test round {}", i + 1);
- SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).setNoStoredFields().execute().actionGet().getHits();
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits();
ranOnce = true;
if (hits.totalHits() != indexer.totalIndexedDocs()) {
int[] hitIds = new int[(int) indexer.totalIndexedDocs()];
@@ -233,13 +232,8 @@ public class RelocationIT extends ESIntegTestCase {
logger.error("Extra id [{}]", id);
}
}
- set.forEach(new IntProcedure() {
-
- @Override
- public void apply(int value) {
- logger.error("Missing id [{}]", value);
- }
-
+ set.forEach((IntProcedure) value -> {
+ logger.error("Missing id [{}]", value);
});
}
assertThat(hits.totalHits(), equalTo(indexer.totalIndexedDocs()));
@@ -333,7 +327,7 @@ public class RelocationIT extends ESIntegTestCase {
indexRandom(true, true, builders2);
// verify cluster was finished.
- assertFalse(client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get().isTimedOut());
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get().isTimedOut());
logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
logger.debug("--> verifying all searches return the same number of docs");
@@ -364,7 +358,7 @@ public class RelocationIT extends ESIntegTestCase {
List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250);
for (int i = 0; i < numDocs; i++) {
- requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
+ requests.add(client().prepareIndex(indexName, "type").setSource("{}"));
}
indexRandom(true, requests);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut());
@@ -499,7 +493,7 @@ public class RelocationIT extends ESIntegTestCase {
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
- if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
+ if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request;
if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) {
// corrupting the segments_N files in order to make sure future recovery re-send files
diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java
index 1503c0a9dd..339d7d6d52 100644
--- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java
+++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java
@@ -33,7 +33,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.recovery.IndexRecoveryIT;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
import org.elasticsearch.indices.recovery.RecoverySettings;
-import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.transport.MockTransportService;
@@ -111,7 +111,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
}
ensureGreen();
// ensure we have flushed segments and make them a big one via optimize
- client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).get();
+ client().admin().indices().prepareFlush().setForce(true).get();
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get();
final CountDownLatch latch = new CountDownLatch(1);
@@ -122,7 +122,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
- if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
+ if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk());
if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
diff --git a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
index 83e4c0e86c..07e6aa0f16 100644
--- a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
+++ b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
@@ -23,15 +23,7 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResp
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.UUIDs;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.snapshots.SnapshotId;
@@ -75,7 +67,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
String id = Integer.toString(i);
client().prepareIndex(indexName, "type1", id).setSource("text", "sometext").get();
}
- client().admin().indices().prepareFlush(indexName).setWaitIfOngoing(true).get();
+ client().admin().indices().prepareFlush(indexName).get();
logger.info("--> create first snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin()
@@ -178,25 +170,4 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
return repository;
}
- private void writeOldFormat(final BlobStoreRepository repository, final List<String> snapshotNames) throws Exception {
- final BytesReference bRef;
- try (BytesStreamOutput bStream = new BytesStreamOutput()) {
- try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
- XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
- builder.startObject();
- builder.startArray("snapshots");
- for (final String snapshotName : snapshotNames) {
- builder.value(snapshotName);
- }
- builder.endArray();
- builder.endObject();
- builder.close();
- }
- bRef = bStream.bytes();
- }
- try (StreamInput stream = bRef.streamInput()) {
- repository.blobContainer().writeBlob(BlobStoreRepository.SNAPSHOTS_FILE, stream, bRef.length()); // write to index file
- }
- }
-
}
diff --git a/core/src/test/java/org/elasticsearch/rest/DeprecationRestHandlerTests.java b/core/src/test/java/org/elasticsearch/rest/DeprecationRestHandlerTests.java
index be0f3b1511..7f46139a26 100644
--- a/core/src/test/java/org/elasticsearch/rest/DeprecationRestHandlerTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/DeprecationRestHandlerTests.java
@@ -78,8 +78,15 @@ public class DeprecationRestHandlerTests extends ESTestCase {
ASCIIHeaderGenerator generator = new ASCIIHeaderGenerator();
String value = generator.ofCodeUnitsLength(random(), 1, 50);
- assertTrue(DeprecationRestHandler.validHeaderValue(value));
- assertSame(value, DeprecationRestHandler.requireValidHeader(value));
+ if (value.trim().length() == 0) {
+ // empty text, not a valid header
+ assertFalse(DeprecationRestHandler.validHeaderValue(value));
+ Exception e = expectThrows(IllegalArgumentException.class, () -> DeprecationRestHandler.requireValidHeader(value));
+ assertEquals("header value must contain only US ASCII text", e.getMessage());
+ } else {
+ assertTrue(DeprecationRestHandler.validHeaderValue(value));
+ assertSame(value, DeprecationRestHandler.requireValidHeader(value));
+ }
}
public void testInvalidHeaderValue() {
diff --git a/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java
index 0c8a1c9a3c..bef1ed44ac 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java
@@ -30,8 +30,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.rest.RestRequest.Method;
-import org.elasticsearch.rest.action.RestMainAction;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
@@ -45,12 +43,13 @@ public class RestMainActionTests extends ESTestCase {
public void testHeadResponse() throws Exception {
final String nodeName = "node1";
final ClusterName clusterName = new ClusterName("cluster1");
+ final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
final boolean available = randomBoolean();
final RestStatus expectedStatus = available ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE;
final Version version = Version.CURRENT;
final Build build = Build.CURRENT;
- final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
+ final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
XContentBuilder builder = JsonXContent.contentBuilder();
RestRequest restRequest = new FakeRestRequest() {
@Override
@@ -70,13 +69,14 @@ public class RestMainActionTests extends ESTestCase {
public void testGetResponse() throws Exception {
final String nodeName = "node1";
final ClusterName clusterName = new ClusterName("cluster1");
+ final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
final boolean available = randomBoolean();
final RestStatus expectedStatus = available ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE;
final Version version = Version.CURRENT;
final Build build = Build.CURRENT;
final boolean prettyPrint = randomBoolean();
- final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
+ final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
XContentBuilder builder = JsonXContent.contentBuilder();
Map<String, String> params = new HashMap<>();
diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
index de188ba9e9..9b7d4073d0 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
@@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.startsWith;
@@ -90,14 +89,10 @@ public class RestAnalyzeActionTests extends ESTestCase {
public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception {
AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
-
- try {
- RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- fail("shouldn't get here");
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), equalTo("Failed to parse request body"));
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(
+ new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), equalTo("Failed to parse request body"));
}
public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception {
@@ -107,14 +102,9 @@ public class RestAnalyzeActionTests extends ESTestCase {
.field("text", "THIS IS A TEST")
.field("unknown", "keyword")
.endObject().bytes();
-
- try {
- RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- fail("shouldn't get here");
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
}
public void testParseXContentForAnalyzeRequestWithInvalidStringExplainParamThrowsException() throws Exception {
@@ -123,64 +113,57 @@ public class RestAnalyzeActionTests extends ESTestCase {
.startObject()
.field("explain", "fals")
.endObject().bytes();
- try {
- RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- fail("shouldn't get here");
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'"));
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'"));
}
public void testDeprecatedParamException() throws Exception {
- BytesReference content = XContentFactory.jsonBuilder()
- .startObject()
- .field("text", "THIS IS A TEST")
- .field("tokenizer", "keyword")
- .array("filters", "lowercase")
- .endObject().bytes();
-
- AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
-
- try {
- RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("Unknown parameter [filters]"));
- }
-
- content = XContentFactory.jsonBuilder()
- .startObject()
- .field("text", "THIS IS A TEST")
- .field("tokenizer", "keyword")
- .array("token_filters", "lowercase")
- .endObject().bytes();
-
- analyzeRequest = new AnalyzeRequest("for test");
-
- try {
- RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]"));
- }
-
- content = XContentFactory.jsonBuilder()
- .startObject()
- .field("text", "THIS IS A TEST")
- .field("tokenizer", "keyword")
- .array("char_filters", "lowercase")
- .endObject().bytes();
-
- analyzeRequest = new AnalyzeRequest("for test");
-
- try {
- RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY));
- } catch (Exception e) {
- assertThat(e, instanceOf(IllegalArgumentException.class));
- assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]"));
- }
-
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("tokenizer", "keyword")
+ .array("filters", "lowercase")
+ .endObject().bytes(),
+ new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [filters]"));
+
+
+ e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("tokenizer", "keyword")
+ .array("token_filters", "lowercase")
+ .endObject().bytes(),
+ new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]"));
+
+
+ e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("tokenizer", "keyword")
+ .array("char_filters", "lowercase")
+ .endObject().bytes(),
+ new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]"));
+
+ e = expectThrows(IllegalArgumentException.class,
+ () -> RestAnalyzeAction.buildFromContent(
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("tokenizer", "keyword")
+ .array("token_filter", "lowercase")
+ .endObject().bytes()
+ , new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY)));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java
index 2d6d65d634..abaefcf438 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java
@@ -30,6 +30,8 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Table;
@@ -55,6 +57,7 @@ import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.search.suggest.completion.CompletionStats;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.rest.FakeRestRequest;
import java.nio.file.Path;
import java.util.ArrayList;
@@ -105,7 +108,7 @@ public class RestIndicesActionTests extends ESTestCase {
clusterState.getClusterName().value(), indicesStr, clusterState, 0, 0, 0, TimeValue.timeValueMillis(1000L)
);
- final Table table = action.buildTable(null, indices, clusterHealth, randomIndicesStatsResponse(indices), metaData);
+ final Table table = action.buildTable(new FakeRestRequest(), indices, clusterHealth, randomIndicesStatsResponse(indices), metaData);
// now, verify the table is correct
int count = 0;
@@ -134,8 +137,10 @@ public class RestIndicesActionTests extends ESTestCase {
for (int i = 0; i < 2; i++) {
ShardId shardId = new ShardId(index, i);
Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(i));
- ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, null, i == 0,
- new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
+ ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, i == 0,
+ i == 0 ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE,
+ new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)
+ );
shardRouting = shardRouting.initialize("node-0", null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
shardRouting = shardRouting.moveToStarted();
CommonStats stats = new CommonStats();
diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
index c0ca4eb227..9495ba7e99 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
@@ -21,10 +21,10 @@ package org.elasticsearch.rest.action.cat;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
-import org.elasticsearch.common.UUIDs;
-import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
+import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Table;
import org.elasticsearch.common.settings.Settings;
@@ -33,7 +33,6 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.rest.RestController;
-import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -66,7 +65,7 @@ public class RestRecoveryActionTests extends ESTestCase {
final RecoveryState.Timer timer = mock(RecoveryState.Timer.class);
when(timer.time()).thenReturn((long)randomIntBetween(1000000, 10 * 1000000));
when(state.getTimer()).thenReturn(timer);
- when(state.getType()).thenReturn(randomFrom(RecoveryState.Type.values()));
+ when(state.getRecoverySource()).thenReturn(TestShardRouting.randomRecoverySource());
when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values()));
final DiscoveryNode sourceNode = randomBoolean() ? mock(DiscoveryNode.class) : null;
if (sourceNode != null) {
@@ -77,13 +76,6 @@ public class RestRecoveryActionTests extends ESTestCase {
when(targetNode.getHostName()).thenReturn(randomAsciiOfLength(8));
when(state.getTargetNode()).thenReturn(targetNode);
- final RestoreSource restoreSource = randomBoolean() ? mock(RestoreSource.class) : null;
- if (restoreSource != null) {
- final Snapshot snapshot = new Snapshot(randomAsciiOfLength(8),
- new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
- when(restoreSource.snapshot()).thenReturn(snapshot);
- }
-
RecoveryState.Index index = mock(RecoveryState.Index.class);
final int totalRecoveredFiles = randomIntBetween(1, 64);
@@ -160,7 +152,7 @@ public class RestRecoveryActionTests extends ESTestCase {
assertThat(cells.get(0).value, equalTo("index"));
assertThat(cells.get(1).value, equalTo(i));
assertThat(cells.get(2).value, equalTo(new TimeValue(state.getTimer().time())));
- assertThat(cells.get(3).value, equalTo(state.getType().name().toLowerCase(Locale.ROOT)));
+ assertThat(cells.get(3).value, equalTo(state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT)));
assertThat(cells.get(4).value, equalTo(state.getStage().name().toLowerCase(Locale.ROOT)));
assertThat(cells.get(5).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName()));
assertThat(cells.get(6).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName()));
@@ -168,10 +160,14 @@ public class RestRecoveryActionTests extends ESTestCase {
assertThat(cells.get(8).value, equalTo(state.getTargetNode().getName()));
assertThat(
cells.get(9).value,
- equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getRepository()));
+ equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ?
+ "n/a" :
+ ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository()));
assertThat(
cells.get(10).value,
- equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getSnapshotId().getName()));
+ equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ?
+ "n/a" :
+ ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName()));
assertThat(cells.get(11).value, equalTo(state.getIndex().totalRecoverFiles()));
assertThat(cells.get(12).value, equalTo(state.getIndex().recoveredFileCount()));
assertThat(cells.get(13).value, equalTo(percent(state.getIndex().recoveredFilesPercent())));
diff --git a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java
index cb880fc4fe..c60e9e06d1 100644
--- a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java
+++ b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.routing;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@@ -30,7 +31,6 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
@@ -62,14 +62,15 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
createIndex("test2");
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias110").searchRouting("1,0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias12").routing("2")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases()
+ .addAliasAction(AliasActions.add().index("test1").alias("alias"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias10").routing("0"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias110").searchRouting("1,0"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias12").routing("2"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias20").routing("0"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias21").routing("1"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias0").routing("0"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0")).get();
assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "test1"), nullValue());
assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "alias"), nullValue());
@@ -103,12 +104,13 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
createIndex("test2");
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
- client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases()
+ .addAliasAction(AliasActions.add().index("test1").alias("alias"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias10").routing("0"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias20").routing("0"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias21").routing("1"))
+ .addAliasAction(AliasActions.add().index("test1").alias("alias0").routing("0"))
+ .addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0")).get();
ClusterState state = clusterService().state();
IndexNameExpressionResolver indexNameExpressionResolver = internalCluster().getInstance(IndexNameExpressionResolver.class);
diff --git a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java
index 6a4d965706..621ade9e31 100644
--- a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java
+++ b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.routing;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
@@ -26,12 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ESIntegTestCase;
-import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
/**
- *
+ * Test aliases with routing.
*/
public class AliasRoutingIT extends ESIntegTestCase {
@@ -43,7 +43,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
public void testAliasCrudRouting() throws Exception {
createIndex("test");
ensureGreen();
- assertAcked(admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")));
+ assertAcked(admin().indices().prepareAliases().addAliasAction(AliasActions.add().index("test").alias("alias0").routing("0")));
logger.info("--> indexing with id [1], and routing [0] using alias");
client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -105,10 +105,10 @@ public class AliasRoutingIT extends ESIntegTestCase {
createIndex("test");
ensureGreen();
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("test", "alias"))
- .addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
- .addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
- .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")));
+ .addAliasAction(AliasActions.add().index("test").alias("alias"))
+ .addAliasAction(AliasActions.add().index("test").alias("alias0").routing("0"))
+ .addAliasAction(AliasActions.add().index("test").alias("alias1").routing("1"))
+ .addAliasAction(AliasActions.add().index("test").alias("alias01").searchRouting("0,1")));
logger.info("--> indexing with id [1], and routing [0] using alias");
client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -200,12 +200,12 @@ public class AliasRoutingIT extends ESIntegTestCase {
createIndex("test-b");
ensureGreen();
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
- .addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
- .addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
- .addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
- .addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
- .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")));
+ .addAliasAction(AliasActions.add().index("test-a").alias("alias-a0").routing("0"))
+ .addAliasAction(AliasActions.add().index("test-a").alias("alias-a1").routing("1"))
+ .addAliasAction(AliasActions.add().index("test-b").alias("alias-b0").routing("0"))
+ .addAliasAction(AliasActions.add().index("test-b").alias("alias-b1").routing("1"))
+ .addAliasAction(AliasActions.add().index("test-a").alias("alias-ab").searchRouting("0"))
+ .addAliasAction(AliasActions.add().index("test-b").alias("alias-ab").searchRouting("1")));
ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -259,7 +259,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
createIndex("index", "index_2");
ensureGreen();
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
+ .addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1")));
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -284,7 +284,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
createIndex("index", "index_2");
ensureGreen();
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
+ .addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1")));
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -305,7 +305,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
ensureGreen();
logger.info("--> creating alias with routing [3]");
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("test", "alias").routing("3")));
+ .addAliasAction(AliasActions.add().index("test").alias("alias").routing("3")));
logger.info("--> indexing with id [0], and routing [3]");
client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
@@ -320,7 +320,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
logger.info("--> creating alias with routing [4]");
assertAcked(admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("test", "alias").routing("4")));
+ .addAliasAction(AliasActions.add().index("test").alias("alias").routing("4")));
logger.info("--> verifying search with wrong routing should not find");
for (int i = 0; i < 5; i++) {
@@ -330,7 +330,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
logger.info("--> creating alias with search routing [3,4] and index routing 4");
assertAcked(client().admin().indices().prepareAliases()
- .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4")));
+ .addAliasAction(AliasActions.add().index("test").alias("alias").searchRouting("3,4").indexRouting("4")));
logger.info("--> indexing with id [1], and routing [4]");
client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java
index c934de2dd7..bc3757386f 100644
--- a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java
+++ b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java
@@ -98,15 +98,15 @@ public class ScriptMetaDataTests extends ESTestCase {
public void testDiff() throws Exception {
ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null);
- builder.storeScript("lang", "1", new BytesArray("abc"));
- builder.storeScript("lang", "2", new BytesArray("def"));
- builder.storeScript("lang", "3", new BytesArray("ghi"));
+ builder.storeScript("lang", "1", new BytesArray("{\"foo\":\"abc\"}"));
+ builder.storeScript("lang", "2", new BytesArray("{\"foo\":\"def\"}"));
+ builder.storeScript("lang", "3", new BytesArray("{\"foo\":\"ghi\"}"));
ScriptMetaData scriptMetaData1 = builder.build();
builder = new ScriptMetaData.Builder(scriptMetaData1);
- builder.storeScript("lang", "2", new BytesArray("changed"));
+ builder.storeScript("lang", "2", new BytesArray("{\"foo\":\"changed\"}"));
builder.deleteScript("lang", "3");
- builder.storeScript("lang", "4", new BytesArray("jkl"));
+ builder.storeScript("lang", "4", new BytesArray("{\"foo\":\"jkl\"}"));
ScriptMetaData scriptMetaData2 = builder.build();
ScriptMetaData.ScriptMetadataDiff diff = (ScriptMetaData.ScriptMetadataDiff) scriptMetaData2.diff(scriptMetaData1);
@@ -118,19 +118,19 @@ public class ScriptMetaDataTests extends ESTestCase {
assertNotNull(((DiffableUtils.MapDiff) diff.pipelines).getUpserts().get("lang#4"));
ScriptMetaData result = (ScriptMetaData) diff.apply(scriptMetaData1);
- assertEquals(new BytesArray("abc"), result.getScriptAsBytes("lang", "1"));
- assertEquals(new BytesArray("changed"), result.getScriptAsBytes("lang", "2"));
- assertEquals(new BytesArray("jkl"), result.getScriptAsBytes("lang", "4"));
+ assertEquals(new BytesArray("{\"foo\":\"abc\"}"), result.getScriptAsBytes("lang", "1"));
+ assertEquals(new BytesArray("{\"foo\":\"changed\"}"), result.getScriptAsBytes("lang", "2"));
+ assertEquals(new BytesArray("{\"foo\":\"jkl\"}"), result.getScriptAsBytes("lang", "4"));
}
public void testBuilder() {
ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null);
builder.storeScript("_lang", "_id", new BytesArray("{\"script\":\"1 + 1\"}"));
- IllegalArgumentException e =
- expectThrows(IllegalArgumentException.class, () -> builder.storeScript("_lang#", "_id", new BytesArray("{}")));
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> builder.storeScript("_lang#", "_id", new BytesArray("{\"foo\": \"bar\"}")));
assertEquals("stored script language can't contain: '#'", e.getMessage());
- e = expectThrows(IllegalArgumentException.class, () -> builder.storeScript("_lang", "_id#", new BytesArray("{}")));
+ e = expectThrows(IllegalArgumentException.class, () -> builder.storeScript("_lang", "_id#", new BytesArray("{\"foo\": \"bar\"}")));
assertEquals("stored script id can't contain: '#'", e.getMessage());
e = expectThrows(IllegalArgumentException.class, () -> builder.deleteScript("_lang#", "_id"));
assertEquals("stored script language can't contain: '#'", e.getMessage());
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java
index 3e07d3c170..bc7cb9ffb6 100644
--- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java
@@ -45,6 +45,7 @@ import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Map;
import static org.hamcrest.CoreMatchers.containsString;
@@ -86,7 +87,9 @@ public class ScriptServiceTests extends ESTestCase {
resourceWatcherService = new ResourceWatcherService(baseSettings, null);
scriptEngineService = new TestEngineService();
dangerousScriptEngineService = new TestDangerousEngineService();
- scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(Collections.singleton(scriptEngineService));
+ TestEngineService defaultScriptServiceEngine = new TestEngineService(Script.DEFAULT_SCRIPT_LANG) {};
+ scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(
+ new HashSet<>(Arrays.asList(scriptEngineService, defaultScriptServiceEngine)));
//randomly register custom script contexts
int randomInt = randomIntBetween(0, 3);
//prevent duplicates using map
@@ -103,7 +106,8 @@ public class ScriptServiceTests extends ESTestCase {
String context = plugin + "_" + operation;
contexts.put(context, new ScriptContext.Plugin(plugin, operation));
}
- scriptEngineRegistry = new ScriptEngineRegistry(Arrays.asList(scriptEngineService, dangerousScriptEngineService));
+ scriptEngineRegistry = new ScriptEngineRegistry(Arrays.asList(scriptEngineService, dangerousScriptEngineService,
+ defaultScriptServiceEngine));
scriptContextRegistry = new ScriptContextRegistry(contexts.values());
scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
scriptContexts = scriptContextRegistry.scriptContexts().toArray(new ScriptContext[scriptContextRegistry.scriptContexts().size()]);
@@ -406,12 +410,11 @@ public class ScriptServiceTests extends ESTestCase {
public void testDefaultLanguage() throws IOException {
Settings.Builder builder = Settings.builder();
- builder.put("script.default_lang", "test");
builder.put("script.inline", "true");
buildScriptService(builder.build());
CompiledScript script = scriptService.compile(new Script("1 + 1", ScriptType.INLINE, null, null),
randomFrom(scriptContexts), Collections.emptyMap());
- assertEquals(script.lang(), "test");
+ assertEquals(script.lang(), Script.DEFAULT_SCRIPT_LANG);
}
public void testStoreScript() throws Exception {
@@ -426,14 +429,14 @@ public class ScriptServiceTests extends ESTestCase {
ScriptMetaData scriptMetaData = result.getMetaData().custom(ScriptMetaData.TYPE);
assertNotNull(scriptMetaData);
assertEquals("abc", scriptMetaData.getScript("_lang", "_id"));
- assertEquals(script, scriptMetaData.getScriptAsBytes("_lang", "_id"));
}
public void testDeleteScript() throws Exception {
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
.metaData(MetaData.builder()
.putCustom(ScriptMetaData.TYPE,
- new ScriptMetaData.Builder(null).storeScript("_lang", "_id", new BytesArray("abc")).build()))
+ new ScriptMetaData.Builder(null).storeScript("_lang", "_id",
+ new BytesArray("{\"script\":\"abc\"}")).build()))
.build();
DeleteStoredScriptRequest request = new DeleteStoredScriptRequest("_lang", "_id");
@@ -509,14 +512,24 @@ public class ScriptServiceTests extends ESTestCase {
public static final String NAME = "test";
+ private final String name;
+
+ public TestEngineService() {
+ this(NAME);
+ }
+
+ public TestEngineService(String name) {
+ this.name = name;
+ }
+
@Override
public String getType() {
- return NAME;
+ return name;
}
@Override
public String getExtension() {
- return NAME;
+ return name;
}
@Override
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java b/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java
index 053ccec652..917650d36b 100644
--- a/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/script/ScriptSettingsTests.java
@@ -20,11 +20,13 @@
package org.elasticsearch.script;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
+import java.util.Iterator;
import java.util.Map;
import static org.hamcrest.Matchers.containsString;
@@ -32,38 +34,55 @@ import static org.hamcrest.Matchers.equalTo;
public class ScriptSettingsTests extends ESTestCase {
- public void testDefaultLanguageIsGroovy() {
+ public void testDefaultLegacyLanguageIsPainless() {
ScriptEngineRegistry scriptEngineRegistry =
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
- assertThat(scriptSettings.getDefaultScriptLanguageSetting().get(Settings.EMPTY), equalTo("groovy"));
+ assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(Settings.EMPTY),
+ equalTo(ScriptSettings.LEGACY_DEFAULT_LANG));
}
- public void testCustomDefaultLanguage() {
+ public void testCustomLegacyDefaultLanguage() {
ScriptEngineRegistry scriptEngineRegistry =
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
String defaultLanguage = CustomScriptEngineService.NAME;
- Settings settings = Settings.builder().put("script.default_lang", defaultLanguage).build();
- assertThat(scriptSettings.getDefaultScriptLanguageSetting().get(settings), equalTo(defaultLanguage));
+ Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLanguage).build();
+ assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings), equalTo(defaultLanguage));
}
- public void testInvalidDefaultLanguage() {
+ public void testInvalidLegacyDefaultLanguage() {
ScriptEngineRegistry scriptEngineRegistry =
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
- Settings settings = Settings.builder().put("script.default_lang", "C++").build();
+ Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, "C++").build();
try {
- scriptSettings.getDefaultScriptLanguageSetting().get(settings);
+ scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings);
fail("should have seen unregistered default language");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("unregistered default language [C++]"));
}
}
+ public void testSettingsAreProperlyPropogated() {
+ ScriptEngineRegistry scriptEngineRegistry =
+ new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
+ ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
+ ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
+ boolean enabled = randomBoolean();
+ Settings s = Settings.builder().put("script.inline", enabled).build();
+ for (Iterator<Setting<Boolean>> iter = scriptSettings.getScriptLanguageSettings().iterator(); iter.hasNext();) {
+ Setting<Boolean> setting = iter.next();
+ if (setting.getKey().endsWith(".inline")) {
+ assertThat("inline settings should have propagated", setting.get(s), equalTo(enabled));
+ assertThat(setting.getDefaultRaw(s), equalTo(Boolean.toString(enabled)));
+ }
+ }
+ }
+
private static class CustomScriptEngineService implements ScriptEngineService {
public static final String NAME = "custom";
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptTests.java b/core/src/test/java/org/elasticsearch/script/ScriptTests.java
new file mode 100644
index 0000000000..316a1c8451
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ScriptTests extends ESTestCase {
+
+ public void testScriptParsing() throws IOException {
+ XContent xContent = randomFrom(XContentType.JSON, XContentType.YAML).xContent();
+ Script expectedScript = createScript(xContent);
+ try (XContentBuilder builder = XContentBuilder.builder(xContent)) {
+ expectedScript.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ try (XContentParser parser = XContentHelper.createParser(builder.bytes())) {
+ Script actualScript = Script.parse(parser, ParseFieldMatcher.STRICT);
+ assertThat(actualScript, equalTo(expectedScript));
+ }
+ }
+ }
+
+ public void testScriptSerialization() throws IOException {
+ XContent xContent = randomFrom(XContentType.JSON, XContentType.YAML).xContent();
+ Script expectedScript = createScript(xContent);
+ try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
+ expectedScript.writeTo(new OutputStreamStreamOutput(out));
+ try (ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray())) {
+ Script actualScript = new Script(new InputStreamStreamInput(in));
+ assertThat(actualScript, equalTo(expectedScript));
+ }
+ }
+ }
+
+ private Script createScript(XContent xContent) throws IOException {
+ final Map<String, Object> params = randomBoolean() ? null : Collections.singletonMap("key", "value");
+ ScriptService.ScriptType scriptType = randomFrom(ScriptService.ScriptType.values());
+ String script;
+ if (scriptType == ScriptService.ScriptType.INLINE) {
+ try (XContentBuilder builder = XContentBuilder.builder(xContent)) {
+ builder.startObject();
+ builder.field("field", randomAsciiOfLengthBetween(1, 5));
+ builder.endObject();
+ script = builder.string();
+ }
+ } else {
+ script = randomAsciiOfLengthBetween(1, 5);
+ }
+ return new Script(
+ script,
+ scriptType,
+ randomFrom("_lang1", "_lang2", null),
+ params,
+ scriptType == ScriptService.ScriptType.INLINE ? xContent.type() : null
+ );
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java
new file mode 100644
index 0000000000..048416c25e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java
@@ -0,0 +1,512 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndicesModule;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
+import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests;
+import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;
+import org.elasticsearch.search.searchafter.SearchAfterBuilder;
+import org.elasticsearch.search.slice.SliceBuilder;
+import org.elasticsearch.search.sort.ScriptSortBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.suggest.SuggestBuilderTests;
+import org.elasticsearch.test.AbstractQueryTestCase;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Function;
+
+public abstract class AbstractSearchTestCase extends ESTestCase {
+
+ protected NamedWriteableRegistry namedWriteableRegistry;
+ protected SearchRequestParsers searchRequestParsers;
+ private TestSearchExtPlugin searchExtPlugin;
+
+ public void setUp() throws Exception {
+ super.setUp();
+ IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
+ searchExtPlugin = new TestSearchExtPlugin();
+ SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.singletonList(searchExtPlugin));
+ List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
+ entries.addAll(indicesModule.getNamedWriteables());
+ entries.addAll(searchModule.getNamedWriteables());
+ namedWriteableRegistry = new NamedWriteableRegistry(entries);
+ searchRequestParsers = searchModule.getSearchRequestParsers();
+ }
+
+ protected SearchSourceBuilder createSearchSourceBuilder() throws IOException {
+ SearchSourceBuilder builder = new SearchSourceBuilder();
+ if (randomBoolean()) {
+ builder.from(randomIntBetween(0, 10000));
+ }
+ if (randomBoolean()) {
+ builder.size(randomIntBetween(0, 10000));
+ }
+ if (randomBoolean()) {
+ builder.explain(randomBoolean());
+ }
+ if (randomBoolean()) {
+ builder.version(randomBoolean());
+ }
+ if (randomBoolean()) {
+ builder.trackScores(randomBoolean());
+ }
+ if (randomBoolean()) {
+ builder.minScore(randomFloat() * 1000);
+ }
+ if (randomBoolean()) {
+ builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout"));
+ }
+ if (randomBoolean()) {
+ builder.terminateAfter(randomIntBetween(1, 100000));
+ }
+
+ switch(randomInt(2)) {
+ case 0:
+ builder.storedFields();
+ break;
+ case 1:
+ builder.storedField("_none_");
+ break;
+ case 2:
+ int fieldsSize = randomInt(25);
+ List<String> fields = new ArrayList<>(fieldsSize);
+ for (int i = 0; i < fieldsSize; i++) {
+ fields.add(randomAsciiOfLengthBetween(5, 50));
+ }
+ builder.storedFields(fields);
+ break;
+ default:
+ throw new IllegalStateException();
+ }
+
+ if (randomBoolean()) {
+ int scriptFieldsSize = randomInt(25);
+ for (int i = 0; i < scriptFieldsSize; i++) {
+ if (randomBoolean()) {
+ builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean());
+ } else {
+ builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"));
+ }
+ }
+ }
+ if (randomBoolean()) {
+ FetchSourceContext fetchSourceContext;
+ int branch = randomInt(5);
+ String[] includes = new String[randomIntBetween(0, 20)];
+ for (int i = 0; i < includes.length; i++) {
+ includes[i] = randomAsciiOfLengthBetween(5, 20);
+ }
+ String[] excludes = new String[randomIntBetween(0, 20)];
+ for (int i = 0; i < excludes.length; i++) {
+ excludes[i] = randomAsciiOfLengthBetween(5, 20);
+ }
+ switch (branch) {
+ case 0:
+ fetchSourceContext = new FetchSourceContext(randomBoolean());
+ break;
+ case 1:
+ fetchSourceContext = new FetchSourceContext(includes, excludes);
+ break;
+ case 2:
+ fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20));
+ break;
+ case 3:
+ fetchSourceContext = new FetchSourceContext(true, includes, excludes);
+ break;
+ case 4:
+ fetchSourceContext = new FetchSourceContext(includes);
+ break;
+ case 5:
+ fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20));
+ break;
+ default:
+ throw new IllegalStateException();
+ }
+ builder.fetchSource(fetchSourceContext);
+ }
+ if (randomBoolean()) {
+ int size = randomIntBetween(0, 20);
+ List<String> statsGroups = new ArrayList<>(size);
+ for (int i = 0; i < size; i++) {
+ statsGroups.add(randomAsciiOfLengthBetween(5, 20));
+ }
+ builder.stats(statsGroups);
+ }
+ if (randomBoolean()) {
+ int indexBoostSize = randomIntBetween(1, 10);
+ for (int i = 0; i < indexBoostSize; i++) {
+ builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10);
+ }
+ }
+ if (randomBoolean()) {
+ builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
+ }
+ if (randomBoolean()) {
+ builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
+ }
+ if (randomBoolean()) {
+ int numSorts = randomIntBetween(1, 5);
+ for (int i = 0; i < numSorts; i++) {
+ int branch = randomInt(5);
+ switch (branch) {
+ case 0:
+ builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
+ break;
+ case 1:
+ builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20),
+ AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
+ break;
+ case 2:
+ builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
+ break;
+ case 3:
+ builder.sort(SortBuilders.scriptSort(new Script("foo"),
+ ScriptSortBuilder.ScriptSortType.NUMBER).order(randomFrom(SortOrder.values())));
+ break;
+ case 4:
+ builder.sort(randomAsciiOfLengthBetween(5, 20));
+ break;
+ case 5:
+ builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values()));
+ break;
+ }
+ }
+ }
+
+ if (randomBoolean()) {
+ int numSearchFrom = randomIntBetween(1, 5);
+ // We build a json version of the search_from first in order to
+ // ensure that every number type remain the same before/after xcontent (de)serialization.
+ // This is not a problem because the final type of each field value is extracted from associated sort field.
+ // This little trick ensure that equals and hashcode are the same when using the xcontent serialization.
+ XContentBuilder jsonBuilder = XContentFactory.jsonBuilder();
+ jsonBuilder.startObject();
+ jsonBuilder.startArray("search_from");
+ for (int i = 0; i < numSearchFrom; i++) {
+ int branch = randomInt(8);
+ switch (branch) {
+ case 0:
+ jsonBuilder.value(randomInt());
+ break;
+ case 1:
+ jsonBuilder.value(randomFloat());
+ break;
+ case 2:
+ jsonBuilder.value(randomLong());
+ break;
+ case 3:
+ jsonBuilder.value(randomDouble());
+ break;
+ case 4:
+ jsonBuilder.value(randomAsciiOfLengthBetween(5, 20));
+ break;
+ case 5:
+ jsonBuilder.value(randomBoolean());
+ break;
+ case 6:
+ jsonBuilder.value(randomByte());
+ break;
+ case 7:
+ jsonBuilder.value(randomShort());
+ break;
+ case 8:
+ jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20)));
+ break;
+ }
+ }
+ jsonBuilder.endArray();
+ jsonBuilder.endObject();
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes());
+ parser.nextToken();
+ parser.nextToken();
+ parser.nextToken();
+ builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues());
+ }
+ if (randomBoolean()) {
+ builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder());
+ }
+ if (randomBoolean()) {
+ builder.suggest(SuggestBuilderTests.randomSuggestBuilder());
+ }
+ if (randomBoolean()) {
+ int numRescores = randomIntBetween(1, 5);
+ for (int i = 0; i < numRescores; i++) {
+ builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder());
+ }
+ }
+ if (randomBoolean()) {
+ builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20)));
+ }
+ if (randomBoolean()) {
+ Set<String> elementNames = new HashSet<>(searchExtPlugin.getSupportedElements().keySet());
+ int numSearchExts = randomIntBetween(1, elementNames.size());
+ while(elementNames.size() > numSearchExts) {
+ elementNames.remove(randomFrom(elementNames));
+ }
+ List<SearchExtBuilder> searchExtBuilders = new ArrayList<>();
+ for (String elementName : elementNames) {
+ searchExtBuilders.add(searchExtPlugin.getSupportedElements().get(elementName).apply(randomAsciiOfLengthBetween(3, 10)));
+ }
+ builder.ext(searchExtBuilders);
+ }
+ if (randomBoolean()) {
+ String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20);
+ int max = between(2, 1000);
+ int id = randomInt(max-1);
+ if (field == null) {
+ builder.slice(new SliceBuilder(id, max));
+ } else {
+ builder.slice(new SliceBuilder(field, id, max));
+ }
+ }
+ return builder;
+ }
+
+ protected SearchRequest createSearchRequest() throws IOException {
+ SearchRequest searchRequest = new SearchRequest();
+ if (randomBoolean()) {
+ searchRequest.indices(generateRandomStringArray(10, 10, false, false));
+ }
+ if (randomBoolean()) {
+ searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
+ }
+ if (randomBoolean()) {
+ searchRequest.types(generateRandomStringArray(10, 10, false, false));
+ }
+ if (randomBoolean()) {
+ searchRequest.preference(randomAsciiOfLengthBetween(3, 10));
+ }
+ if (randomBoolean()) {
+ searchRequest.requestCache(randomBoolean());
+ }
+ if (randomBoolean()) {
+ searchRequest.routing(randomAsciiOfLengthBetween(3, 10));
+ }
+ if (randomBoolean()) {
+ searchRequest.scroll(randomPositiveTimeValue());
+ }
+ if (randomBoolean()) {
+ searchRequest.searchType(randomFrom(SearchType.values()));
+ }
+ if (randomBoolean()) {
+ searchRequest.source(createSearchSourceBuilder());
+ }
+ return searchRequest;
+ }
+
+ private static class TestSearchExtPlugin extends Plugin implements SearchPlugin {
+ private final List<SearchExtSpec<? extends SearchExtBuilder>> searchExtSpecs;
+ private final Map<String, Function<String, ? extends SearchExtBuilder>> supportedElements;
+
+ private TestSearchExtPlugin() {
+ int numSearchExts = randomIntBetween(1, 3);
+ this.searchExtSpecs = new ArrayList<>(numSearchExts);
+ this.supportedElements = new HashMap<>();
+ for (int i = 0; i < numSearchExts; i++) {
+ switch (randomIntBetween(0, 2)) {
+ case 0:
+ if (this.supportedElements.put(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new) == null) {
+ this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new,
+ new TestSearchExtParser<>(TestSearchExtBuilder1::new)));
+ }
+ break;
+ case 1:
+ if (this.supportedElements.put(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new) == null) {
+ this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new,
+ new TestSearchExtParser<>(TestSearchExtBuilder2::new)));
+ }
+ break;
+ case 2:
+ if (this.supportedElements.put(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new) == null) {
+ this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new,
+ new TestSearchExtParser<>(TestSearchExtBuilder3::new)));
+ }
+ break;
+ default:
+ throw new UnsupportedOperationException();
+ }
+ }
+ }
+
+ Map<String, Function<String, ? extends SearchExtBuilder>> getSupportedElements() {
+ return supportedElements;
+ }
+
+ @Override
+ public List<SearchExtSpec<?>> getSearchExts() {
+ return searchExtSpecs;
+ }
+ }
+
+ private static class TestSearchExtParser<T extends SearchExtBuilder> implements SearchExtParser<T> {
+ private final Function<String, T> searchExtBuilderFunction;
+
+ TestSearchExtParser(Function<String, T> searchExtBuilderFunction) {
+ this.searchExtBuilderFunction = searchExtBuilderFunction;
+ }
+
+ @Override
+ public T fromXContent(XContentParser parser) throws IOException {
+ return searchExtBuilderFunction.apply(parseField(parser));
+ }
+
+ String parseField(XContentParser parser) throws IOException {
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken());
+ }
+ if (parser.nextToken() != XContentParser.Token.FIELD_NAME) {
+ throw new ParsingException(parser.getTokenLocation(), "field_name expected, found " + parser.currentToken());
+ }
+ String field = parser.currentName();
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken());
+ }
+ if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken());
+ }
+ if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken());
+ }
+ return field;
+ }
+ }
+
+ //Would be nice to have a single builder that gets its name as a parameter, but the name wouldn't get a value when the object
+ //is created reading from the stream (constructor that takes a StreamInput) which is a problem as we check that after reading
+ //a named writeable its name is the expected one. That's why we go for the following less dynamic approach.
+ private static class TestSearchExtBuilder1 extends TestSearchExtBuilder {
+ private static final String NAME = "name1";
+
+ TestSearchExtBuilder1(String field) {
+ super(NAME, field);
+ }
+
+ TestSearchExtBuilder1(StreamInput in) throws IOException {
+ super(NAME, in);
+ }
+ }
+
+ private static class TestSearchExtBuilder2 extends TestSearchExtBuilder {
+ private static final String NAME = "name2";
+
+ TestSearchExtBuilder2(String field) {
+ super(NAME, field);
+ }
+
+ TestSearchExtBuilder2(StreamInput in) throws IOException {
+ super(NAME, in);
+ }
+ }
+
+ private static class TestSearchExtBuilder3 extends TestSearchExtBuilder {
+ private static final String NAME = "name3";
+
+ TestSearchExtBuilder3(String field) {
+ super(NAME, field);
+ }
+
+ TestSearchExtBuilder3(StreamInput in) throws IOException {
+ super(NAME, in);
+ }
+ }
+
+ private abstract static class TestSearchExtBuilder extends SearchExtBuilder {
+ final String objectName;
+ protected final String name;
+
+ TestSearchExtBuilder(String name, String objectName) {
+ this.name = name;
+ this.objectName = objectName;
+ }
+
+ TestSearchExtBuilder(String name, StreamInput in) throws IOException {
+ this.name = name;
+ this.objectName = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(objectName);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ TestSearchExtBuilder that = (TestSearchExtBuilder) o;
+ return Objects.equals(objectName, that.objectName) &&
+ Objects.equals(name, that.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectName, name);
+ }
+
+ @Override
+ public String getWriteableName() {
+ return name;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startObject(objectName);
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java
index 9ab5af9f39..e546130b2e 100644
--- a/core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java
+++ b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java
@@ -17,7 +17,7 @@
* under the License.
*/
-package org.elasticsearch.search.internal;
+package org.elasticsearch.search;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanQuery;
@@ -26,6 +26,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.TypeFieldMapper;
+import org.elasticsearch.search.DefaultSearchContext;
import org.elasticsearch.test.ESTestCase;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
diff --git a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java
index 3db3492f41..6f48dbe491 100644
--- a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java
@@ -24,51 +24,13 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.indices.IndicesModule;
-import org.elasticsearch.test.ESTestCase;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
-import java.util.List;
-import static java.util.Collections.emptyList;
-import static org.elasticsearch.search.builder.SearchSourceBuilderTests.createSearchSourceBuilder;
-
-public class SearchRequestTests extends ESTestCase {
-
- private static NamedWriteableRegistry namedWriteableRegistry;
-
- @BeforeClass
- public static void beforeClass() {
- IndicesModule indicesModule = new IndicesModule(emptyList()) {
- @Override
- protected void configure() {
- bindMapperExtension();
- }
- };
- SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
- List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
- entries.addAll(indicesModule.getNamedWriteables());
- entries.addAll(searchModule.getNamedWriteables());
- namedWriteableRegistry = new NamedWriteableRegistry(entries);
- }
-
- @AfterClass
- public static void afterClass() {
- namedWriteableRegistry = null;
- }
+public class SearchRequestTests extends AbstractSearchTestCase {
public void testSerialization() throws Exception {
SearchRequest searchRequest = createSearchRequest();
@@ -206,38 +168,6 @@ public class SearchRequestTests extends ESTestCase {
}
}
- public static SearchRequest createSearchRequest() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
- if (randomBoolean()) {
- searchRequest.indices(generateRandomStringArray(10, 10, false, false));
- }
- if (randomBoolean()) {
- searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
- }
- if (randomBoolean()) {
- searchRequest.types(generateRandomStringArray(10, 10, false, false));
- }
- if (randomBoolean()) {
- searchRequest.preference(randomAsciiOfLengthBetween(3, 10));
- }
- if (randomBoolean()) {
- searchRequest.requestCache(randomBoolean());
- }
- if (randomBoolean()) {
- searchRequest.routing(randomAsciiOfLengthBetween(3, 10));
- }
- if (randomBoolean()) {
- searchRequest.scroll(randomPositiveTimeValue());
- }
- if (randomBoolean()) {
- searchRequest.searchType(randomFrom(SearchType.values()));
- }
- if (randomBoolean()) {
- searchRequest.source(createSearchSourceBuilder());
- }
- return searchRequest;
- }
-
private static SearchRequest copyRequest(SearchRequest searchRequest) throws IOException {
SearchRequest result = new SearchRequest();
result.indices(searchRequest.indices());
diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java
index fba71499cc..7de8f6a498 100644
--- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java
@@ -18,10 +18,15 @@
*/
package org.elasticsearch.search;
-
+import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.Query;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -34,12 +39,19 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.ShardFetchRequest;
+import org.elasticsearch.search.internal.ShardSearchLocalRequest;
+import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.singletonList;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
@@ -112,6 +124,75 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
assertEquals(activeRefs, indexShard.store().refCount());
}
+ public void testSearchWhileIndexDeleted() throws IOException, InterruptedException {
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
+
+ SearchService service = getInstanceFromNode(SearchService.class);
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
+ IndexShard indexShard = indexService.getShard(0);
+ AtomicBoolean running = new AtomicBoolean(true);
+ CountDownLatch startGun = new CountDownLatch(1);
+ Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
+ final Thread thread = new Thread() {
+ @Override
+ public void run() {
+ startGun.countDown();
+ while(running.get()) {
+ service.afterIndexDeleted(indexService.index(), indexService.getIndexSettings().getSettings());
+ if (randomBoolean()) {
+ // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search
+ // context in a non-sane way.
+ try {
+ semaphore.acquire();
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ client().prepareIndex("index", "type").setSource("field", "value")
+ .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse indexResponse) {
+ semaphore.release();
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ semaphore.release();
+ }
+ });
+ }
+ }
+ }
+ };
+ thread.start();
+ startGun.await();
+ try {
+ final int rounds = scaledRandomIntBetween(100, 10000);
+ for (int i = 0; i < rounds; i++) {
+ try {
+ QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase(
+ new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
+ new SearchSourceBuilder(), new String[0], false));
+ IntArrayList intCursors = new IntArrayList(1);
+ intCursors.add(0);
+ ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */);
+ service.executeFetchPhase(req);
+ } catch (AlreadyClosedException ex) {
+ throw ex;
+ } catch (IllegalStateException ex) {
+ assertEquals("search context is already closed can't increment refCount current count [0]", ex.getMessage());
+ } catch (SearchContextMissingException ex) {
+ // that's fine
+ }
+ }
+ } finally {
+ running.set(false);
+ thread.join();
+ semaphore.acquire(Integer.MAX_VALUE);
+ }
+ }
+
public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin {
@Override
public List<QuerySpec<?>> getQueries() {
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java
index a7476381b8..1f453aa40f 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java
@@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
+import org.elasticsearch.search.SearchExtRegistry;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.index.query.QueryParseContext;
@@ -74,78 +75,33 @@ import static org.hamcrest.Matchers.containsString;
public class AggregatorParsingTests extends ESTestCase {
- private static Injector injector;
- private static Index index;
+ private String[] currentTypes;
- private static String[] currentTypes;
-
- protected static String[] getCurrentTypes() {
+ protected String[] getCurrentTypes() {
return currentTypes;
}
- private static NamedWriteableRegistry namedWriteableRegistry;
-
- protected static AggregatorParsers aggParsers;
- protected static IndicesQueriesRegistry queriesRegistry;
- protected static ParseFieldMatcher parseFieldMatcher;
+ protected AggregatorParsers aggParsers;
+ protected IndicesQueriesRegistry queriesRegistry;
+ protected ParseFieldMatcher parseFieldMatcher;
/**
* Setup for the whole base test class.
*/
- @BeforeClass
- public static void init() throws IOException {
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
// we have to prefer CURRENT since with the range of versions we support
// it's rather unlikely to get the current actually.
- Version version = randomBoolean() ? Version.CURRENT
- : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
Settings settings = Settings.builder().put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false).build();
-
- index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
- Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- final ThreadPool threadPool = new ThreadPool(settings);
- final ClusterService clusterService = createClusterService(threadPool);
- setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
- .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
- ScriptModule scriptModule = newTestScriptModule();
- List<Setting<?>> scriptSettings = scriptModule.getSettings();
- scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
- SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
-
- IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) {
- @Override
- protected void configure() {
- bindMapperExtension();
- }
- };
- SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
+ IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) ;
+ SearchModule searchModule = new SearchModule(settings, false, emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
- namedWriteableRegistry = new NamedWriteableRegistry(entries);
- injector = new ModulesBuilder().add(
- (b) -> {
- b.bind(Environment.class).toInstance(new Environment(settings));
- b.bind(ThreadPool.class).toInstance(threadPool);
- b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
- },
- settingsModule, indicesModule, searchModule,
- new IndexSettingsModule(index, settings),
- new AbstractModule() {
- @Override
- protected void configure() {
- bind(ClusterService.class).toInstance(clusterService);
- bind(CircuitBreakerService.class).toInstance(new NoneCircuitBreakerService());
- bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
- }
- }).createInjector();
- aggParsers = injector.getInstance(SearchRequestParsers.class).aggParsers;
+ aggParsers = searchModule.getSearchRequestParsers().aggParsers;
// create some random type with some default field, those types will
// stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];
@@ -153,21 +109,10 @@ public class AggregatorParsingTests extends ESTestCase {
String type = randomAsciiOfLengthBetween(1, 10);
currentTypes[i] = type;
}
- queriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
+ queriesRegistry = searchModule.getQueryParserRegistry();
parseFieldMatcher = ParseFieldMatcher.STRICT;
}
- @AfterClass
- public static void afterClass() throws Exception {
- injector.getInstance(ClusterService.class).close();
- terminate(injector.getInstance(ThreadPool.class));
- injector = null;
- index = null;
- aggParsers = null;
- currentTypes = null;
- namedWriteableRegistry = null;
- }
-
public void testTwoTypes() throws Exception {
String source = JsonXContent.contentBuilder()
.startObject()
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
index b9c0e5f09c..ca5b98af30 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
@@ -19,46 +19,24 @@
package org.elasticsearch.search.aggregations;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.inject.Injector;
-import org.elasticsearch.common.inject.ModulesBuilder;
-import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.IndicesModule;
-import org.elasticsearch.indices.breaker.CircuitBreakerService;
-import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
-import org.elasticsearch.script.ScriptModule;
-import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.IndexSettingsModule;
-import org.elasticsearch.test.InternalSettingsPlugin;
-import org.elasticsearch.test.VersionUtils;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
@@ -66,8 +44,6 @@ import java.util.Collections;
import java.util.List;
import static java.util.Collections.emptyList;
-import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
-import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuilder<AB>> extends ESTestCase {
@@ -78,104 +54,45 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String IP_FIELD_NAME = "mapped_ip";
- protected static final String OBJECT_FIELD_NAME = "mapped_object";
- protected static final String[] mappedFieldNames = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME,
- DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, IP_FIELD_NAME, OBJECT_FIELD_NAME};
- private static Injector injector;
- private static Index index;
+ private String[] currentTypes;
- private static String[] currentTypes;
-
- protected static String[] getCurrentTypes() {
+ protected String[] getCurrentTypes() {
return currentTypes;
}
- private static NamedWriteableRegistry namedWriteableRegistry;
+ private NamedWriteableRegistry namedWriteableRegistry;
- protected static AggregatorParsers aggParsers;
- protected static IndicesQueriesRegistry queriesRegistry;
- protected static ParseFieldMatcher parseFieldMatcher;
+ protected AggregatorParsers aggParsers;
+ protected IndicesQueriesRegistry queriesRegistry;
+ protected ParseFieldMatcher parseFieldMatcher;
protected abstract AB createTestAggregatorBuilder();
/**
* Setup for the whole base test class.
*/
- @BeforeClass
- public static void init() throws IOException {
- index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
- injector = buildInjector(index);
- namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class);
- aggParsers = injector.getInstance(SearchRequestParsers.class).aggParsers;
- //create some random type with some default field, those types will stick around for all of the subclasses
- currentTypes = new String[randomIntBetween(0, 5)];
- for (int i = 0; i < currentTypes.length; i++) {
- String type = randomAsciiOfLengthBetween(1, 10);
- currentTypes[i] = type;
- }
- queriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
- parseFieldMatcher = ParseFieldMatcher.STRICT;
- }
-
- public static final Injector buildInjector(Index index) {
- // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
- Version version = randomBoolean() ? Version.CURRENT
- : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
+ public void setUp() throws Exception {
+ super.setUp();
Settings settings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
- .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
.build();
-
- Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- final ThreadPool threadPool = new ThreadPool(settings);
- final ClusterService clusterService = createClusterService(threadPool);
- setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
- .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
- ScriptModule scriptModule = newTestScriptModule();
- List<Setting<?>> scriptSettings = scriptModule.getSettings();
- scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
- SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
- IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) {
- @Override
- protected void configure() {
- bindMapperExtension();
- }
- };
- SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
+ IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
+ SearchModule searchModule = new SearchModule(settings, false, emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
- NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
- return new ModulesBuilder().add(
- (b) -> {
- b.bind(Environment.class).toInstance(new Environment(settings));
- b.bind(ThreadPool.class).toInstance(threadPool);
- b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
- b.bind(ClusterService.class).toProvider(Providers.of(clusterService));
- b.bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
- b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
- },
- settingsModule, indicesModule, searchModule, new IndexSettingsModule(index, settings)
- ).createInjector();
- }
-
-
- @AfterClass
- public static void afterClass() throws Exception {
- injector.getInstance(ClusterService.class).close();
- terminate(injector.getInstance(ThreadPool.class));
- injector = null;
- index = null;
- aggParsers = null;
- currentTypes = null;
- namedWriteableRegistry = null;
+ namedWriteableRegistry = new NamedWriteableRegistry(entries);
+ queriesRegistry = searchModule.getQueryParserRegistry();
+ aggParsers = searchModule.getSearchRequestParsers().aggParsers;
+ //create some random type with some default field, those types will stick around for all of the subclasses
+ currentTypes = new String[randomIntBetween(0, 5)];
+ for (int i = 0; i < currentTypes.length; i++) {
+ String type = randomAsciiOfLengthBetween(1, 10);
+ currentTypes[i] = type;
+ }
+ parseFieldMatcher = ParseFieldMatcher.STRICT;
}
/**
@@ -268,24 +185,6 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
}
}
- protected String[] getRandomTypes() {
- String[] types;
- if (currentTypes.length > 0 && randomBoolean()) {
- int numberOfQueryTypes = randomIntBetween(1, currentTypes.length);
- types = new String[numberOfQueryTypes];
- for (int i = 0; i < numberOfQueryTypes; i++) {
- types[i] = randomFrom(currentTypes);
- }
- } else {
- if (randomBoolean()) {
- types = new String[]{MetaData.ALL};
- } else {
- types = new String[0];
- }
- }
- return types;
- }
-
public String randomNumericField() {
int randomInt = randomInt(3);
switch (randomInt) {
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java
index b0952b2de6..0f4c539c79 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java
@@ -20,29 +20,32 @@
package org.elasticsearch.search.aggregations;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.index.Index;
+import org.elasticsearch.env.Environment;
import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
-import org.elasticsearch.search.SearchRequestParsers;
+import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
+import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static java.util.Collections.emptyList;
import static org.hamcrest.Matchers.equalTo;
public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelineAggregationBuilder<AF>> extends ESTestCase {
@@ -53,59 +56,51 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
- private static Injector injector;
- private static Index index;
-
- private static String[] currentTypes;
+ private String[] currentTypes;
- protected static String[] getCurrentTypes() {
+ protected String[] getCurrentTypes() {
return currentTypes;
}
- private static NamedWriteableRegistry namedWriteableRegistry;
+ private NamedWriteableRegistry namedWriteableRegistry;
- protected static AggregatorParsers aggParsers;
- protected static ParseFieldMatcher parseFieldMatcher;
- protected static IndicesQueriesRegistry queriesRegistry;
+ protected AggregatorParsers aggParsers;
+ protected IndicesQueriesRegistry queriesRegistry;
+ protected ParseFieldMatcher parseFieldMatcher;
protected abstract AF createTestAggregatorFactory();
/**
* Setup for the whole base test class.
*/
- @BeforeClass
- public static void init() throws IOException {
- index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
- injector = BaseAggregationTestCase.buildInjector(index);
- namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class);
- aggParsers = injector.getInstance(SearchRequestParsers.class).aggParsers;
+ public void setUp() throws Exception {
+ super.setUp();
+ Settings settings = Settings.builder()
+ .put("node.name", AbstractQueryTestCase.class.toString())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
+ .build();
+ IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
+ SearchModule searchModule = new SearchModule(settings, false, emptyList());
+ List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
+ entries.addAll(indicesModule.getNamedWriteables());
+ entries.addAll(searchModule.getNamedWriteables());
+ namedWriteableRegistry = new NamedWriteableRegistry(entries);
+ queriesRegistry = searchModule.getQueryParserRegistry();
+ aggParsers = searchModule.getSearchRequestParsers().aggParsers;
//create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];
for (int i = 0; i < currentTypes.length; i++) {
String type = randomAsciiOfLengthBetween(1, 10);
currentTypes[i] = type;
}
- queriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
parseFieldMatcher = ParseFieldMatcher.STRICT;
}
- @AfterClass
- public static void afterClass() throws Exception {
- injector.getInstance(ClusterService.class).close();
- terminate(injector.getInstance(ThreadPool.class));
- injector = null;
- index = null;
- aggParsers = null;
- currentTypes = null;
- namedWriteableRegistry = null;
- }
-
/**
* Generic test that creates new AggregatorFactory from the test
* AggregatorFactory and checks both for equality and asserts equality on
* the two queries.
*/
-
public void testFromXContent() throws IOException {
AF testAgg = createTestAggregatorFactory();
AggregatorFactories.Builder factoriesBuilder = AggregatorFactories.builder().skipResolveOrder().addPipelineAggregator(testAgg);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java
index e38f33cfa2..cfb290284a 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java
@@ -78,7 +78,7 @@ public class BooleanTermsIT extends ESIntegTestCase {
builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
.startObject()
.field(SINGLE_VALUED_FIELD_NAME, singleValue)
- .field(MULTI_VALUED_FIELD_NAME, multiValue)
+ .array(MULTI_VALUED_FIELD_NAME, multiValue)
.endObject());
}
indexRandom(true, builders);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java
index c8b7aa6ad5..51d702e354 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java
@@ -468,7 +468,7 @@ public class NestedIT extends ESIntegTestCase {
client().prepareIndex("idx4", "product", "1").setSource(jsonBuilder().startObject()
.field("name", "product1")
- .field("categories", "1", "2", "3", "4")
+ .array("categories", "1", "2", "3", "4")
.startArray("property")
.startObject().field("id", 1).endObject()
.startObject().field("id", 2).endObject()
@@ -477,7 +477,7 @@ public class NestedIT extends ESIntegTestCase {
.endObject()).get();
client().prepareIndex("idx4", "product", "2").setSource(jsonBuilder().startObject()
.field("name", "product2")
- .field("categories", "1", "2")
+ .array("categories", "1", "2")
.startArray("property")
.startObject().field("id", 1).endObject()
.startObject().field("id", 5).endObject()
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java
index fbd69755f2..b30dca1c9a 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java
@@ -103,7 +103,7 @@ public abstract class ShardSizeTestCase extends ESIntegTestCase {
protected List<IndexRequestBuilder> indexDoc(String shard, String key, int times) throws Exception {
IndexRequestBuilder[] builders = new IndexRequestBuilder[times];
for (int i = 0; i < times; i++) {
- builders[i] = client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ builders[i] = client().prepareIndex("idx", "type").setRouting(shard).setSource(jsonBuilder()
.startObject()
.field("key", key)
.field("value", 1)
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java
index 2d9d5ca043..fab1f8b7d3 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java
@@ -20,12 +20,10 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.plugins.Plugin;
@@ -49,6 +47,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods;
@@ -172,7 +171,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
@Override
public List<SearchExtensionSpec<SignificanceHeuristic, SignificanceHeuristicParser>> getSignificanceHeuristics() {
return singletonList(new SearchExtensionSpec<SignificanceHeuristic, SignificanceHeuristicParser>(SimpleHeuristic.NAME,
- SimpleHeuristic::new, SimpleHeuristic::parse));
+ SimpleHeuristic::new, (context) -> SimpleHeuristic.parse(context)));
}
@Override
@@ -239,9 +238,9 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
return subsetFreq / subsetSize > supersetFreq / supersetSize ? 2.0 : 1.0;
}
- public static SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
+ public static SignificanceHeuristic parse(XContentParseContext context)
throws IOException, QueryShardException {
- parser.nextToken();
+ context.getParser().nextToken();
return new SimpleHeuristic();
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java
index 5cc6ec5863..7cca4baade 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java
@@ -74,7 +74,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase {
public void setupSuiteScopeCluster() throws Exception {
createIndex(UNMAPPED_IDX_NAME);
assertAcked(prepareCreate(IDX_NAME)
- .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point,geohash_prefix=true,geohash_precision=12",
+ .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point",
MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=keyword"));
singleTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java
index 594eba7ddb..cff1fa746d 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java
@@ -134,11 +134,11 @@ public class CardinalityIT extends ESIntegTestCase {
builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
.startObject()
.field("str_value", "s" + i)
- .field("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
+ .array("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
.field("l_value", i)
- .field("l_values", new int[] {i * 2, i * 2 + 1})
+ .array("l_values", new int[] {i * 2, i * 2 + 1})
.field("d_value", i)
- .field("d_values", new double[]{i * 2, i * 2 + 1})
+ .array("d_values", new double[]{i * 2, i * 2 + 1})
.endObject());
}
indexRandom(true, builders);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java
index 4e46a0b6a6..510df4c572 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.search.aggregations.metrics;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.plugins.Plugin;
@@ -477,7 +479,7 @@ public class StatsIT extends AbstractNumericTestCase {
ShardSearchFailure[] failures = response.getShardFailures();
if (failures.length != expectedFailures) {
for (ShardSearchFailure failure : failures) {
- logger.error("Shard Failure: {}", failure.getCause(), failure);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause());
}
fail("Unexpected shard failures!");
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
index f535054491..45d44c863a 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
@@ -219,7 +219,7 @@ public class TopHitsIT extends ESIntegTestCase {
builder.endArray().endObject();
builders.add(
- client().prepareIndex("articles", "article").setCreate(true).setSource(builder)
+ client().prepareIndex("articles", "article").setSource(builder)
);
}
@@ -580,7 +580,7 @@ public class TopHitsIT extends ESIntegTestCase {
topHits("hits").size(1)
.highlighter(new HighlightBuilder().field("text"))
.explain(true)
- .field("text")
+ .storedField("text")
.fieldDataField("field1")
.scriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()))
.fetchSource("text", null)
@@ -956,4 +956,41 @@ public class TopHitsIT extends ESIntegTestCase {
.get();
assertNoFailures(response);
}
+
+ public void testNoStoredFields() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").storedField("_none_")
+ )
+ )
+ .get();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(10L));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10L));
+ assertThat(hits.getHits().length, equalTo(3));
+ for (SearchHit hit : hits) {
+ assertThat(hit.source(), nullValue());
+ assertThat(hit.id(), nullValue());
+ assertThat(hit.type(), nullValue());
+ }
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java
index 72f961963e..3f2b4c4462 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java
@@ -34,6 +34,7 @@ import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import static org.hamcrest.Matchers.containsString;
@@ -58,13 +59,25 @@ public class TopHitsTests extends BaseAggregationTestCase<TopHitsAggregationBuil
if (randomBoolean()) {
factory.trackScores(randomBoolean());
}
- if (randomBoolean()) {
- int fieldsSize = randomInt(25);
- List<String> fields = new ArrayList<>(fieldsSize);
- for (int i = 0; i < fieldsSize; i++) {
- fields.add(randomAsciiOfLengthBetween(5, 50));
- }
- factory.fields(fields);
+ switch (randomInt(3)) {
+ case 0:
+ break;
+ case 1:
+ factory.storedField("_none_");
+ break;
+ case 2:
+ factory.storedFields(Collections.emptyList());
+ break;
+ case 3:
+ int fieldsSize = randomInt(25);
+ List<String> fields = new ArrayList<>(fieldsSize);
+ for (int i = 0; i < fieldsSize; i++) {
+ fields.add(randomAsciiOfLengthBetween(5, 50));
+ }
+ factory.storedFields(fields);
+ break;
+ default:
+ throw new IllegalStateException();
}
if (randomBoolean()) {
int fieldDataFieldsSize = randomInt(25);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java
index fdd50692e8..101e52fcad 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java
@@ -28,9 +28,11 @@ import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
import org.elasticsearch.test.ESIntegTestCase;
+import org.joda.time.DateTime;
import java.io.IOException;
import java.util.ArrayList;
@@ -42,6 +44,7 @@ import java.util.Map;
import java.util.function.Function;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript;
@@ -58,11 +61,13 @@ public class BucketScriptIT extends ESIntegTestCase {
private static final String FIELD_2_NAME = "field2";
private static final String FIELD_3_NAME = "field3";
private static final String FIELD_4_NAME = "field4";
+ private static final String FIELD_5_NAME = "field5";
private static int interval;
private static int numDocs;
private static int minNumber;
private static int maxNumber;
+ private static long date;
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
@@ -125,6 +130,7 @@ public class BucketScriptIT extends ESIntegTestCase {
numDocs = randomIntBetween(10, 500);
minNumber = -200;
maxNumber = 200;
+ date = randomLong();
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int docs = 0; docs < numDocs; docs++) {
@@ -142,6 +148,7 @@ public class BucketScriptIT extends ESIntegTestCase {
jsonBuilder.field(FIELD_2_NAME, randomIntBetween(minNumber, maxNumber));
jsonBuilder.field(FIELD_3_NAME, randomIntBetween(minNumber, maxNumber));
jsonBuilder.field(FIELD_4_NAME, randomIntBetween(minNumber, maxNumber));
+ jsonBuilder.field(FIELD_5_NAME, date);
jsonBuilder.endObject();
return jsonBuilder;
}
@@ -238,6 +245,52 @@ public class BucketScriptIT extends ESIntegTestCase {
}
}
+ public void testInlineScriptWithDateRange() {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range")
+ .field(FIELD_5_NAME)
+ .addUnboundedFrom(date)
+ .subAggregation(sum("field2Sum").field(FIELD_2_NAME))
+ .subAggregation(sum("field3Sum").field(FIELD_3_NAME))
+ .subAggregation(sum("field4Sum").field(FIELD_4_NAME))
+ .subAggregation(
+ bucketScript("seriesArithmetic",
+ new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null)
+ , "field2Sum", "field3Sum", "field4Sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Range.Bucket> buckets = range.getBuckets();
+
+ for (int i = 0; i < buckets.size(); ++i) {
+ Range.Bucket bucket = buckets.get(i);
+ if (bucket.getDocCount() == 0) {
+ SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic");
+ assertThat(seriesArithmetic, nullValue());
+ } else {
+ Sum field2Sum = bucket.getAggregations().get("field2Sum");
+ assertThat(field2Sum, notNullValue());
+ double field2SumValue = field2Sum.getValue();
+ Sum field3Sum = bucket.getAggregations().get("field3Sum");
+ assertThat(field3Sum, notNullValue());
+ double field3SumValue = field3Sum.getValue();
+ Sum field4Sum = bucket.getAggregations().get("field4Sum");
+ assertThat(field4Sum, notNullValue());
+ double field4SumValue = field4Sum.getValue();
+ SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic");
+ assertThat(seriesArithmetic, notNullValue());
+ double seriesArithmeticValue = seriesArithmetic.value();
+ assertThat(seriesArithmeticValue, equalTo(field2SumValue + field3SumValue + field4SumValue));
+ }
+ }
+ }
+
public void testInlineScriptSingleVariable() {
SearchResponse response = client()
.prepareSearch("idx")
diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java
index faa960cb58..54316b8d79 100644
--- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java
+++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java
@@ -113,7 +113,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase {
threads[j].join();
}
// this might time out on some machines if they are really busy and you hit lots of throttling
- ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).setTimeout("5m").get();
+ ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).setTimeout("5m").get();
assertNoTimeout(resp);
// if we hit only non-critical exceptions we make sure that the post search works
if (!nonCriticalExceptions.isEmpty()) {
diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java
index 07f696e491..61dd798f5e 100644
--- a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java
@@ -99,7 +99,7 @@ public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase {
client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get();
}
client().admin().indices().prepareRefresh("test").execute().get();
- client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get();
+ client().admin().indices().prepareFlush("test").execute().get();
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
.put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate)
diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java
index 76cfa610a6..9493ec048e 100644
--- a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java
+++ b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java
@@ -86,7 +86,7 @@ public class TransportSearchFailuresIT extends ESIntegTestCase {
ClusterHealthResponse clusterHealth = client()
.admin()
.cluster()
- .health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0)
+ .health(clusterHealthRequest("test").waitForYellowStatus().waitForNoRelocatingShards(true)
.waitForActiveShards(test.totalNumShards)).actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java
index b3cec09ddf..967af3d3af 100644
--- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java
@@ -20,390 +20,34 @@
package org.elasticsearch.search.builder;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.inject.Injector;
-import org.elasticsearch.common.inject.ModulesBuilder;
-import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsModule;
-import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
-import org.elasticsearch.indices.IndicesModule;
-import org.elasticsearch.indices.breaker.CircuitBreakerService;
-import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
-import org.elasticsearch.indices.query.IndicesQueriesRegistry;
-import org.elasticsearch.script.Script;
-import org.elasticsearch.script.ScriptModule;
-import org.elasticsearch.script.ScriptService;
-import org.elasticsearch.search.SearchModule;
-import org.elasticsearch.search.SearchRequestParsers;
-import org.elasticsearch.search.aggregations.AggregationBuilders;
-import org.elasticsearch.search.aggregations.AggregatorParsers;
-import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
-import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests;
-import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;
+import org.elasticsearch.search.AbstractSearchTestCase;
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
-import org.elasticsearch.search.searchafter.SearchAfterBuilder;
-import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
-import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType;
-import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
-import org.elasticsearch.search.suggest.SuggestBuilderTests;
-import org.elasticsearch.search.suggest.Suggesters;
-import org.elasticsearch.test.AbstractQueryTestCase;
-import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.IndexSettingsModule;
-import org.elasticsearch.test.InternalSettingsPlugin;
-import org.elasticsearch.test.VersionUtils;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import static java.util.Collections.emptyList;
-import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
-import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasToString;
-public class SearchSourceBuilderTests extends ESTestCase {
- private static Injector injector;
-
- private static NamedWriteableRegistry namedWriteableRegistry;
-
- private static SearchRequestParsers searchRequestParsers;
-
- private static Index index;
-
- private static String[] currentTypes;
-
- private static ParseFieldMatcher parseFieldMatcher;
-
- @BeforeClass
- public static void init() throws IOException {
- // we have to prefer CURRENT since with the range of versions we support
- // it's rather unlikely to get the current actually.
- Version version = randomBoolean() ? Version.CURRENT
- : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
- Settings settings = Settings.builder()
- .put("node.name", AbstractQueryTestCase.class.toString())
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
- .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false).build();
-
- index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
- Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- final ThreadPool threadPool = new ThreadPool(settings);
- final ClusterService clusterService = createClusterService(threadPool);
- setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
- .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
- ScriptModule scriptModule = newTestScriptModule();
- List<Setting<?>> scriptSettings = scriptModule.getSettings();
- scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
- SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
- IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) {
- @Override
- protected void configure() {
- bindMapperExtension();
- }
- };
- SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
- List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
- entries.addAll(indicesModule.getNamedWriteables());
- entries.addAll(searchModule.getNamedWriteables());
- namedWriteableRegistry = new NamedWriteableRegistry(entries);
- injector = new ModulesBuilder().add(
- (b) -> {
- b.bind(Environment.class).toInstance(new Environment(settings));
- b.bind(ThreadPool.class).toInstance(threadPool);
- b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
- },
- settingsModule, indicesModule, searchModule,
- new IndexSettingsModule(index, settings),
- new AbstractModule() {
- @Override
- protected void configure() {
- bind(ClusterService.class).toProvider(Providers.of(clusterService));
- bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
- bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
- }
- }
- ).createInjector();
- searchRequestParsers = injector.getInstance(SearchRequestParsers.class);
- // create some random type with some default field, those types will
- // stick around for all of the subclasses
- currentTypes = new String[randomIntBetween(0, 5)];
- for (int i = 0; i < currentTypes.length; i++) {
- String type = randomAsciiOfLengthBetween(1, 10);
- currentTypes[i] = type;
- }
- parseFieldMatcher = ParseFieldMatcher.STRICT;
- }
-
- @AfterClass
- public static void afterClass() throws Exception {
- injector.getInstance(ClusterService.class).close();
- terminate(injector.getInstance(ThreadPool.class));
- injector = null;
- index = null;
- searchRequestParsers = null;
- currentTypes = null;
- namedWriteableRegistry = null;
- }
-
- public static SearchSourceBuilder createSearchSourceBuilder() throws IOException {
- SearchSourceBuilder builder = new SearchSourceBuilder();
- if (randomBoolean()) {
- builder.from(randomIntBetween(0, 10000));
- }
- if (randomBoolean()) {
- builder.size(randomIntBetween(0, 10000));
- }
- if (randomBoolean()) {
- builder.explain(randomBoolean());
- }
- if (randomBoolean()) {
- builder.version(randomBoolean());
- }
- if (randomBoolean()) {
- builder.trackScores(randomBoolean());
- }
- if (randomBoolean()) {
- builder.minScore(randomFloat() * 1000);
- }
- if (randomBoolean()) {
- builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout"));
- }
- if (randomBoolean()) {
- builder.terminateAfter(randomIntBetween(1, 100000));
- }
- // if (randomBoolean()) {
- // builder.defaultRescoreWindowSize(randomIntBetween(1, 100));
- // }
- if (randomBoolean()) {
- int fieldsSize = randomInt(25);
- List<String> fields = new ArrayList<>(fieldsSize);
- for (int i = 0; i < fieldsSize; i++) {
- fields.add(randomAsciiOfLengthBetween(5, 50));
- }
- builder.storedFields(fields);
- }
- if (randomBoolean()) {
- int fieldDataFieldsSize = randomInt(25);
- for (int i = 0; i < fieldDataFieldsSize; i++) {
- builder.docValueField(randomAsciiOfLengthBetween(5, 50));
- }
- }
- if (randomBoolean()) {
- int scriptFieldsSize = randomInt(25);
- for (int i = 0; i < scriptFieldsSize; i++) {
- if (randomBoolean()) {
- builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean());
- } else {
- builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"));
- }
- }
- }
- if (randomBoolean()) {
- FetchSourceContext fetchSourceContext;
- int branch = randomInt(5);
- String[] includes = new String[randomIntBetween(0, 20)];
- for (int i = 0; i < includes.length; i++) {
- includes[i] = randomAsciiOfLengthBetween(5, 20);
- }
- String[] excludes = new String[randomIntBetween(0, 20)];
- for (int i = 0; i < excludes.length; i++) {
- excludes[i] = randomAsciiOfLengthBetween(5, 20);
- }
- switch (branch) {
- case 0:
- fetchSourceContext = new FetchSourceContext(randomBoolean());
- break;
- case 1:
- fetchSourceContext = new FetchSourceContext(includes, excludes);
- break;
- case 2:
- fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20));
- break;
- case 3:
- fetchSourceContext = new FetchSourceContext(true, includes, excludes);
- break;
- case 4:
- fetchSourceContext = new FetchSourceContext(includes);
- break;
- case 5:
- fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20));
- break;
- default:
- throw new IllegalStateException();
- }
- builder.fetchSource(fetchSourceContext);
- }
- if (randomBoolean()) {
- int size = randomIntBetween(0, 20);
- List<String> statsGroups = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- statsGroups.add(randomAsciiOfLengthBetween(5, 20));
- }
- builder.stats(statsGroups);
- }
- if (randomBoolean()) {
- int indexBoostSize = randomIntBetween(1, 10);
- for (int i = 0; i < indexBoostSize; i++) {
- builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10);
- }
- }
- if (randomBoolean()) {
- builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
- }
- if (randomBoolean()) {
- builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
- }
- if (randomBoolean()) {
- int numSorts = randomIntBetween(1, 5);
- for (int i = 0; i < numSorts; i++) {
- int branch = randomInt(5);
- switch (branch) {
- case 0:
- builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
- break;
- case 1:
- builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20),
- AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
- break;
- case 2:
- builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
- break;
- case 3:
- builder.sort(SortBuilders.scriptSort(new Script("foo"),
- ScriptSortType.NUMBER).order(randomFrom(SortOrder.values())));
- break;
- case 4:
- builder.sort(randomAsciiOfLengthBetween(5, 20));
- break;
- case 5:
- builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values()));
- break;
- }
- }
- }
-
- if (randomBoolean()) {
- int numSearchFrom = randomIntBetween(1, 5);
- // We build a json version of the search_from first in order to
- // ensure that every number type remain the same before/after xcontent (de)serialization.
- // This is not a problem because the final type of each field value is extracted from associated sort field.
- // This little trick ensure that equals and hashcode are the same when using the xcontent serialization.
- XContentBuilder jsonBuilder = XContentFactory.jsonBuilder();
- jsonBuilder.startObject();
- jsonBuilder.startArray("search_from");
- for (int i = 0; i < numSearchFrom; i++) {
- int branch = randomInt(8);
- switch (branch) {
- case 0:
- jsonBuilder.value(randomInt());
- break;
- case 1:
- jsonBuilder.value(randomFloat());
- break;
- case 2:
- jsonBuilder.value(randomLong());
- break;
- case 3:
- jsonBuilder.value(randomDouble());
- break;
- case 4:
- jsonBuilder.value(randomAsciiOfLengthBetween(5, 20));
- break;
- case 5:
- jsonBuilder.value(randomBoolean());
- break;
- case 6:
- jsonBuilder.value(randomByte());
- break;
- case 7:
- jsonBuilder.value(randomShort());
- break;
- case 8:
- jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20)));
- break;
- }
- }
- jsonBuilder.endArray();
- jsonBuilder.endObject();
- XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes());
- parser.nextToken();
- parser.nextToken();
- parser.nextToken();
- builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues());
- }
- if (randomBoolean()) {
- builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder());
- }
- if (randomBoolean()) {
- builder.suggest(SuggestBuilderTests.randomSuggestBuilder());
- }
- if (randomBoolean()) {
- int numRescores = randomIntBetween(1, 5);
- for (int i = 0; i < numRescores; i++) {
- builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder());
- }
- }
- if (randomBoolean()) {
- builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20)));
- }
- if (randomBoolean()) {
- XContentBuilder xContentBuilder = XContentFactory.jsonBuilder();
- xContentBuilder.startObject();
- xContentBuilder.field("term_vectors_fetch", randomAsciiOfLengthBetween(5, 20));
- xContentBuilder.endObject();
- builder.ext(xContentBuilder);
- }
- if (randomBoolean()) {
- String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20);
- int max = between(2, 1000);
- int id = randomInt(max-1);
- if (field == null) {
- builder.slice(new SliceBuilder(id, max));
- } else {
- builder.slice(new SliceBuilder(field, id, max));
- }
- }
- return builder;
- }
+public class SearchSourceBuilderTests extends AbstractSearchTestCase {
public void testFromXContent() throws IOException {
SearchSourceBuilder testSearchSourceBuilder = createSearchSourceBuilder();
@@ -415,11 +59,11 @@ public class SearchSourceBuilderTests extends ESTestCase {
assertParseSearchSource(testSearchSourceBuilder, builder.bytes());
}
- private static void assertParseSearchSource(SearchSourceBuilder testBuilder, BytesReference searchSourceAsBytes) throws IOException {
+ private void assertParseSearchSource(SearchSourceBuilder testBuilder, BytesReference searchSourceAsBytes) throws IOException {
assertParseSearchSource(testBuilder, searchSourceAsBytes, ParseFieldMatcher.STRICT);
}
- private static void assertParseSearchSource(SearchSourceBuilder testBuilder, BytesReference searchSourceAsBytes, ParseFieldMatcher pfm)
+ private void assertParseSearchSource(SearchSourceBuilder testBuilder, BytesReference searchSourceAsBytes, ParseFieldMatcher pfm)
throws IOException {
XContentParser parser = XContentFactory.xContent(searchSourceAsBytes).createParser(searchSourceAsBytes);
QueryParseContext parseContext = new QueryParseContext(searchRequestParsers.queryParsers, parser, pfm);
@@ -428,15 +72,14 @@ public class SearchSourceBuilderTests extends ESTestCase {
// test the embedded case
}
SearchSourceBuilder newBuilder = SearchSourceBuilder.fromXContent(parseContext, searchRequestParsers.aggParsers,
- searchRequestParsers.suggesters);
+ searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertNull(parser.nextToken());
assertEquals(testBuilder, newBuilder);
assertEquals(testBuilder.hashCode(), newBuilder.hashCode());
}
- private static QueryParseContext createParseContext(XContentParser parser) {
- QueryParseContext context = new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher);
- return context;
+ private QueryParseContext createParseContext(XContentParser parser) {
+ return new QueryParseContext(searchRequestParsers.queryParsers, parser, ParseFieldMatcher.STRICT);
}
public void testSerialization() throws IOException {
@@ -480,7 +123,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
}
//we use the streaming infra to create a copy of the builder provided as argument
- protected static SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException {
+ private SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
builder.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
@@ -494,7 +137,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
String restContent = " { \"_source\": { \"includes\": \"include\", \"excludes\": \"*.field2\"}}";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertArrayEquals(new String[]{"*.field2"}, searchSourceBuilder.fetchSource().excludes());
assertArrayEquals(new String[]{"include"}, searchSourceBuilder.fetchSource().includes());
}
@@ -503,7 +146,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
String restContent = " { \"_source\": false}";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertArrayEquals(new String[]{}, searchSourceBuilder.fetchSource().excludes());
assertArrayEquals(new String[]{}, searchSourceBuilder.fetchSource().includes());
assertFalse(searchSourceBuilder.fetchSource().fetchSource());
@@ -511,12 +154,32 @@ public class SearchSourceBuilderTests extends ESTestCase {
}
}
+ public void testMultipleQueryObjectsAreRejected() throws Exception {
+ String restContent =
+ " { \"query\": {\n" +
+ " \"multi_match\": {\n" +
+ " \"query\": \"workd\",\n" +
+ " \"fields\": [\"title^5\", \"plain_body\"]\n" +
+ " },\n" +
+ " \"filters\": {\n" +
+ " \"terms\": {\n" +
+ " \"status\": [ 3 ]\n" +
+ " }\n" +
+ " }\n" +
+ " } }";
+ try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
+ ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(createParseContext(parser),
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers));
+ assertEquals("[multi_match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", e.getMessage());
+ }
+ }
+
public void testParseSort() throws IOException {
{
String restContent = " { \"sort\": \"foo\"}";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(1, searchSourceBuilder.sorts().size());
assertEquals(new FieldSortBuilder("foo"), searchSourceBuilder.sorts().get(0));
}
@@ -532,7 +195,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
" ]}";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(5, searchSourceBuilder.sorts().size());
assertEquals(new FieldSortBuilder("post_date"), searchSourceBuilder.sorts().get(0));
assertEquals(new FieldSortBuilder("user"), searchSourceBuilder.sorts().get(1));
@@ -545,34 +208,34 @@ public class SearchSourceBuilderTests extends ESTestCase {
public void testAggsParsing() throws IOException {
{
- String restContent = "{\n" + " " +
- "\"aggs\": {" +
- " \"test_agg\": {\n" +
- " " + "\"terms\" : {\n" +
- " \"field\": \"foo\"\n" +
- " }\n" +
- " }\n" +
- " }\n" +
+ String restContent = "{\n" + " " +
+ "\"aggs\": {" +
+ " \"test_agg\": {\n" +
+ " " + "\"terms\" : {\n" +
+ " \"field\": \"foo\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
"}\n";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(1, searchSourceBuilder.aggregations().count());
}
}
{
- String restContent = "{\n" +
- " \"aggregations\": {" +
- " \"test_agg\": {\n" +
- " \"terms\" : {\n" +
- " \"field\": \"foo\"\n" +
- " }\n" +
- " }\n" +
- " }\n" +
+ String restContent = "{\n" +
+ " \"aggregations\": {" +
+ " \"test_agg\": {\n" +
+ " \"terms\" : {\n" +
+ " \"field\": \"foo\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
"}\n";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(1, searchSourceBuilder.aggregations().count());
}
}
@@ -598,7 +261,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
"}\n";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(1, searchSourceBuilder.rescores().size());
assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50),
searchSourceBuilder.rescores().get(0));
@@ -621,7 +284,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
"}\n";
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertEquals(1, searchSourceBuilder.rescores().size());
assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50),
searchSourceBuilder.rescores().get(0));
@@ -634,7 +297,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
final String query = "{ \"query\": { \"match_all\": {}}, \"timeout\": \"" + timeout + "\"}";
try (XContentParser parser = XContentFactory.xContent(query).createParser(query)) {
final SearchSourceBuilder builder = SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
assertThat(builder.timeout(), equalTo(TimeValue.parseTimeValue(timeout, null, "timeout")));
}
}
@@ -647,7 +310,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
expectThrows(
ElasticsearchParseException.class,
() -> SearchSourceBuilder.fromXContent(createParseContext(parser),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters));
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers));
assertThat(e, hasToString(containsString("unit is missing or unrecognized")));
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java
index df9af970f9..02def44416 100644
--- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java
+++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java
@@ -26,15 +26,18 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse;
-import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.search.SearchExtBuilder;
+import org.elasticsearch.search.SearchExtParser;
import org.elasticsearch.search.SearchHitField;
-import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.internal.InternalSearchHitField;
import org.elasticsearch.search.internal.SearchContext;
@@ -44,29 +47,32 @@ import org.elasticsearch.test.ESIntegTestCase.Scope;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import static java.util.Collections.singletonList;
-import static java.util.Collections.singletonMap;
import static org.elasticsearch.client.Requests.indexRequest;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.CoreMatchers.equalTo;
-/**
- *
- */
-@ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1)
+@ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2)
public class FetchSubPhasePluginIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Arrays.asList(FetchTermVectorsPlugin.class);
+ return Collections.singletonList(FetchTermVectorsPlugin.class);
+ }
+
+ @Override
+ protected Collection<Class<? extends Plugin>> transportClientPlugins() {
+ return nodePlugins();
}
+ @SuppressWarnings("unchecked")
public void testPlugin() throws Exception {
client().admin()
.indices()
@@ -88,12 +94,11 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
client().admin().indices().prepareRefresh().execute().actionGet();
- XContentBuilder extSource = jsonBuilder().startObject()
- .field("term_vectors_fetch", "test")
- .endObject();
- SearchResponse response = client().prepareSearch().setSource(new SearchSourceBuilder().ext(extSource)).get();
+ SearchResponse response = client().prepareSearch().setSource(new SearchSourceBuilder()
+ .ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))).get();
assertSearchResponse(response);
- assertThat(((Map<String, Integer>) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), equalTo(2));
+ assertThat(((Map<String, Integer>) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"),
+ equalTo(2));
assertThat(((Map<String, Integer>) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"),
equalTo(2));
assertThat(((Map<String, Integer>) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"),
@@ -105,46 +110,35 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
public List<FetchSubPhase> getFetchSubPhases(FetchPhaseConstructionContext context) {
return singletonList(new TermVectorsFetchSubPhase());
}
- }
-
- public static final class TermVectorsFetchSubPhase implements FetchSubPhase {
-
- public static final ContextFactory<TermVectorsFetchContext> CONTEXT_FACTORY = new ContextFactory<TermVectorsFetchContext>() {
-
- @Override
- public String getName() {
- return NAMES[0];
- }
-
- @Override
- public TermVectorsFetchContext newContextInstance() {
- return new TermVectorsFetchContext();
- }
- };
-
- public static final String[] NAMES = {"term_vectors_fetch"};
@Override
- public Map<String, ? extends SearchParseElement> parseElements() {
- return singletonMap("term_vectors_fetch", new TermVectorsFetchParseElement());
+ public List<SearchExtSpec<?>> getSearchExts() {
+ return Collections.singletonList(new SearchExtSpec<>(TermVectorsFetchSubPhase.NAME,
+ TermVectorsFetchBuilder::new, TermVectorsFetchParser.INSTANCE));
}
+ }
+
+ private static final class TermVectorsFetchSubPhase implements FetchSubPhase {
+ private static final String NAME = "term_vectors_fetch";
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
- if (context.getFetchSubPhaseContext(CONTEXT_FACTORY).hitExecutionNeeded() == false) {
+ TermVectorsFetchBuilder fetchSubPhaseBuilder = (TermVectorsFetchBuilder)context.getSearchExt(NAME);
+ if (fetchSubPhaseBuilder == null) {
return;
}
- String field = context.getFetchSubPhaseContext(CONTEXT_FACTORY).getField();
-
+ String field = fetchSubPhaseBuilder.getField();
if (hitContext.hit().fieldsOrNull() == null) {
hitContext.hit().fields(new HashMap<>());
}
- SearchHitField hitField = hitContext.hit().fields().get(NAMES[0]);
+ SearchHitField hitField = hitContext.hit().fields().get(NAME);
if (hitField == null) {
- hitField = new InternalSearchHitField(NAMES[0], new ArrayList<>(1));
- hitContext.hit().fields().put(NAMES[0], hitField);
+ hitField = new InternalSearchHitField(NAME, new ArrayList<>(1));
+ hitContext.hit().fields().put(NAME, hitField);
}
- TermVectorsResponse termVector = TermVectorsService.getTermVectors(context.indexShard(), new TermVectorsRequest(context.indexShard().shardId().getIndex().getName(), hitContext.hit().type(), hitContext.hit().id()));
+ TermVectorsRequest termVectorsRequest = new TermVectorsRequest(context.indexShard().shardId().getIndex().getName(),
+ hitContext.hit().type(), hitContext.hit().id());
+ TermVectorsResponse termVector = TermVectorsService.getTermVectors(context.indexShard(), termVectorsRequest);
try {
Map<String, Integer> tv = new HashMap<>();
TermsEnum terms = termVector.getFields().terms(field).iterator();
@@ -159,39 +153,74 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
}
}
- public static class TermVectorsFetchParseElement extends FetchSubPhaseParseElement<TermVectorsFetchContext> {
+ private static final class TermVectorsFetchParser implements SearchExtParser<TermVectorsFetchBuilder> {
+
+ private static final TermVectorsFetchParser INSTANCE = new TermVectorsFetchParser();
+
+ private TermVectorsFetchParser() {
+ }
@Override
- protected void innerParse(XContentParser parser, TermVectorsFetchContext termVectorsFetchContext, SearchContext searchContext)
- throws Exception {
+ public TermVectorsFetchBuilder fromXContent(XContentParser parser) throws IOException {
+ String field;
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.VALUE_STRING) {
- String fieldName = parser.text();
- termVectorsFetchContext.setField(fieldName);
+ field = parser.text();
} else {
- throw new IllegalStateException("Expected a VALUE_STRING but got " + token);
+ throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token);
+ }
+ if (field == null) {
+ throw new ParsingException(parser.getTokenLocation(), "no fields specified for " + TermVectorsFetchSubPhase.NAME);
}
+ return new TermVectorsFetchBuilder(field);
}
+ }
- @Override
- protected FetchSubPhase.ContextFactory getContextFactory() {
- return TermVectorsFetchSubPhase.CONTEXT_FACTORY;
+ private static final class TermVectorsFetchBuilder extends SearchExtBuilder {
+ private final String field;
+
+ private TermVectorsFetchBuilder(String field) {
+ this.field = field;
}
- }
- public static class TermVectorsFetchContext extends FetchSubPhaseContext {
+ private TermVectorsFetchBuilder(StreamInput in) throws IOException {
+ this.field = in.readString();
+ }
- private String field = null;
+ private String getField() {
+ return field;
+ }
- public TermVectorsFetchContext() {
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ TermVectorsFetchBuilder that = (TermVectorsFetchBuilder) o;
+ return Objects.equals(field, that.field);
}
- public void setField(String field) {
- this.field = field;
+ @Override
+ public int hashCode() {
+ return Objects.hash(field);
}
- public String getField() {
- return field;
+ @Override
+ public String getWriteableName() {
+ return TermVectorsFetchSubPhase.NAME;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(field);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.field(TermVectorsFetchSubPhase.NAME, field);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java
new file mode 100644
index 0000000000..d20fb4e0c0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.subphase;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.TestSearchContext;
+
+import java.io.IOException;
+import java.util.Collections;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class FetchSourceSubPhaseTests extends ESTestCase {
+
+ public void testFetchSource() throws IOException {
+ XContentBuilder source = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject();
+ FetchSubPhase.HitContext hitContext = hitExecute(source, true, null, null);
+ assertEquals(Collections.singletonMap("field","value"), hitContext.hit().sourceAsMap());
+ }
+
+ public void testBasicFiltering() throws IOException {
+ XContentBuilder source = XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value")
+ .field("field2", "value2")
+ .endObject();
+ FetchSubPhase.HitContext hitContext = hitExecute(source, false, null, null);
+ assertNull(hitContext.hit().sourceAsMap());
+
+ hitContext = hitExecute(source, true, "field1", null);
+ assertEquals(Collections.singletonMap("field1","value"), hitContext.hit().sourceAsMap());
+
+ hitContext = hitExecute(source, true, "hello", null);
+ assertEquals(Collections.emptyMap(), hitContext.hit().sourceAsMap());
+
+ hitContext = hitExecute(source, true, "*", "field2");
+ assertEquals(Collections.singletonMap("field1","value"), hitContext.hit().sourceAsMap());
+ }
+
+ public void testMultipleFiltering() throws IOException {
+ XContentBuilder source = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .field("field2", "value2")
+ .endObject();
+ FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, new String[]{"*.notexisting", "field"}, null);
+ assertEquals(Collections.singletonMap("field","value"), hitContext.hit().sourceAsMap());
+
+ hitContext = hitExecuteMultiple(source, true, new String[]{"field.notexisting.*", "field"}, null);
+ assertEquals(Collections.singletonMap("field","value"), hitContext.hit().sourceAsMap());
+ }
+
+ public void testSourceDisabled() throws IOException {
+ FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null);
+ assertNull(hitContext.hit().sourceAsMap());
+
+ hitContext = hitExecute(null, false, null, null);
+ assertNull(hitContext.hit().sourceAsMap());
+
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> hitExecute(null, true, "field1", null));
+ assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
+ "for index [index]", exception.getMessage());
+
+ exception = expectThrows(IllegalArgumentException.class,
+ () -> hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"}));
+ assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
+ "for index [index]", exception.getMessage());
+ }
+
+ private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) {
+ return hitExecuteMultiple(source, fetchSource,
+ include == null ? Strings.EMPTY_ARRAY : new String[]{include},
+ exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude});
+ }
+
+ private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) {
+ FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes);
+ SearchContext searchContext = new FetchSourceSubPhaseTestSearchContext(fetchSourceContext, source == null ? null : source.bytes());
+ FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
+ hitContext.reset(new InternalSearchHit(1, null, null, null), null, 1, null);
+ FetchSourceSubPhase phase = new FetchSourceSubPhase();
+ phase.hitExecute(searchContext, hitContext);
+ return hitContext;
+ }
+
+ private static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext {
+ final FetchSourceContext context;
+ final BytesReference source;
+ final IndexShard indexShard;
+
+ FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) {
+ super(null);
+ this.context = context;
+ this.source = source;
+ this.indexShard = mock(IndexShard.class);
+ when(indexShard.shardId()).thenReturn(new ShardId("index", "index", 1));
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ return context != null && context.fetchSource();
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ return context;
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ SearchLookup lookup = super.lookup();
+ lookup.source().setSource(source);
+ return lookup;
+ }
+
+ @Override
+ public IndexShard indexShard() {
+ return indexShard;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index 33e8cb3784..19904be38b 100644
--- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.fetch.subphase.highlight;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequestBuilder;
@@ -27,6 +26,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Settings.Builder;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -38,6 +38,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.Operator;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
@@ -50,8 +51,8 @@ import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import java.io.IOException;
-import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -96,7 +97,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Arrays.asList(InternalSettingsPlugin.class);
+ return Collections.singletonList(InternalSettingsPlugin.class);
}
public void testHighlightingWithWildcardName() throws IOException {
@@ -1537,7 +1538,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
.addMapping("type1", "tags", "type=text"));
ensureGreen();
client().prepareIndex("test", "type1", "1")
- .setSource(jsonBuilder().startObject().field("tags",
+ .setSource(jsonBuilder().startObject().array("tags",
"this is a really long tag i would like to highlight",
"here is another one that is very long tag and has the tag token near the end").endObject()).get();
refresh();
@@ -2727,7 +2728,6 @@ public class HighlighterSearchIT extends ESIntegTestCase {
.startObject("properties")
.startObject("geo_point")
.field("type", "geo_point")
- .field("geohash", true)
.endObject()
.startObject("text")
.field("type", "text")
@@ -2756,6 +2756,45 @@ public class HighlighterSearchIT extends ESIntegTestCase {
assertThat(search.getHits().getAt(0).highlightFields().get("text").fragments().length, equalTo(1));
}
+ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException {
+ // same as above but in this example the query gets rewritten during highlighting
+ // see https://github.com/elastic/elasticsearch/issues/17537#issuecomment-244939633
+ XContentBuilder mappings = jsonBuilder();
+ mappings.startObject();
+ mappings.startObject("jobs")
+ .startObject("_all")
+ .field("enabled", false)
+ .endObject()
+ .startObject("properties")
+ .startObject("loc")
+ .field("type", "geo_point")
+ .endObject()
+ .startObject("jd")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject();
+ mappings.endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("jobs", mappings));
+ ensureYellow();
+
+ client().prepareIndex("test", "jobs", "1")
+ .setSource(jsonBuilder().startObject().field("jd", "some आवश्यकता है- आर्य समाज अनाथालय, 68 सिविल लाइन्स, बरेली को एक पुरूष" +
+ " रस text")
+ .field("loc", "12.934059,77.610741").endObject())
+ .get();
+ refresh();
+
+ QueryBuilder query = QueryBuilders.functionScoreQuery(QueryBuilders.boolQuery().filter(QueryBuilders.geoBoundingBoxQuery("loc")
+ .setCorners(new GeoPoint(48.934059, 41.610741), new GeoPoint(-23.065941, 113.610741))));
+ SearchResponse search = client().prepareSearch().setSource(
+ new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().highlighterType("plain").field("jd"))).get();
+ assertNoFailures(search);
+ assertThat(search.getHits().totalHits(), equalTo(1L));
+ }
+
+
public void testKeywordFieldHighlighting() throws IOException {
// check that keyword highlighting works
XContentBuilder mappings = jsonBuilder();
@@ -2851,4 +2890,21 @@ public class HighlighterSearchIT extends ESIntegTestCase {
assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
assertThat(field.getFragments()[1].string(), equalTo("<em>cow</em>"));
}
+
+ public void testFunctionScoreQueryHighlight() throws Exception {
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("text", "brown").endObject())
+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro")))
+ .highlighter(new HighlightBuilder()
+ .field(new Field("text")))
+ .get();
+ assertHitCount(searchResponse, 1);
+ HighlightField field = searchResponse.getHits().getAt(0).highlightFields().get("text");
+ assertThat(field.getFragments().length, equalTo(1));
+ assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighterTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighterTests.java
index 428751e885..b923c2464d 100644
--- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighterTests.java
+++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighterTests.java
@@ -68,8 +68,11 @@ public class PlainHighlighterTests extends LuceneTestCase {
String fragment = highlighter.getBestFragment(fieldNameAnalyzer.tokenStream("text", "Arbitrary text field which should not cause " +
"a failure"), "Arbitrary text field which should not cause a failure");
assertThat(fragment, equalTo("Arbitrary text field which should not cause a <B>failure</B>"));
- // TODO: This test will fail if we pass in an instance of GeoPointInBBoxQueryImpl too. Should we also find a way to work around that
- // or can the query not be rewritten before it is passed into the highlighter?
+ Query rewritten = boolQuery.rewrite(null);
+ highlighter = new org.apache.lucene.search.highlight.Highlighter(new CustomQueryScorer(rewritten));
+ fragment = highlighter.getBestFragment(fieldNameAnalyzer.tokenStream("text", "Arbitrary text field which should not cause " +
+ "a failure"), "Arbitrary text field which should not cause a failure");
+ assertThat(fragment, equalTo("Arbitrary text field which should not cause a <B>failure</B>"));
}
public void testGeoPointInBBoxQueryHighlighting() throws IOException, InvalidTokenOffsetsException {
diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java
index b091149fa0..e66eeb4876 100644
--- a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java
@@ -80,7 +80,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Arrays.asList(CustomScriptPlugin.class);
+ return Collections.singletonList(CustomScriptPlugin.class);
}
public static class CustomScriptPlugin extends MockScriptPlugin {
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
index 14f3e8e0bb..cc832f8a7d 100644
--- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
@@ -19,12 +19,9 @@
package org.elasticsearch.search.geo;
-import org.elasticsearch.common.logging.ESLoggerFactory;
-import org.locationtech.spatial4j.context.SpatialContext;
-import org.locationtech.spatial4j.distance.DistanceUtils;
-import org.locationtech.spatial4j.exception.InvalidShapeException;
-import org.locationtech.spatial4j.shape.Shape;
-
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
+import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.query.SpatialArgs;
@@ -48,9 +45,11 @@ import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
import org.elasticsearch.common.geo.builders.PolygonBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilders;
import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.query.GeohashCellQuery;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
@@ -59,6 +58,10 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import org.junit.BeforeClass;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.distance.DistanceUtils;
+import org.locationtech.spatial4j.exception.InvalidShapeException;
+import org.locationtech.spatial4j.shape.Shape;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
@@ -432,8 +435,10 @@ public class GeoFilterIT extends ESIntegTestCase {
String name = hit.getId();
if (version.before(Version.V_2_2_0)) {
point.resetFromString(hit.fields().get("pin").getValue().toString());
- } else {
+ } else if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
point.resetFromIndexHash(hit.fields().get("pin").getValue());
+ } else {
+ point.resetFromString(hit.getFields().get("pin").getValue());
}
double dist = distance(point.getLat(), point.getLon(), 51.11, 9.851);
@@ -445,7 +450,7 @@ public class GeoFilterIT extends ESIntegTestCase {
}
}
- public void testGeohashCellFilter() throws IOException {
+ public void testLegacyGeohashCellFilter() throws IOException {
String geohash = randomhash(10);
logger.info("Testing geohash_cell filter for [{}]", geohash);
@@ -456,8 +461,11 @@ public class GeoFilterIT extends ESIntegTestCase {
logger.info("Parent Neighbors {}", parentNeighbors);
ensureYellow();
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
- client().admin().indices().prepareCreate("locations").addMapping("location", "pin", "type=geo_point,geohash_prefix=true,lat_lon=false").execute().actionGet();
+ client().admin().indices().prepareCreate("locations").setSettings(settings).addMapping("location", "pin",
+ "type=geo_point,geohash_prefix=true,lat_lon=false").execute().actionGet();
// Index a pin
client().prepareIndex("locations", "location", "1").setCreate(true).setSource("pin", geohash).execute().actionGet();
@@ -562,7 +570,10 @@ public class GeoFilterIT extends ESIntegTestCase {
strategy.makeQuery(args);
return true;
} catch (UnsupportedSpatialOperation e) {
- ESLoggerFactory.getLogger(GeoFilterIT.class.getName()).info("Unsupported spatial operation {}", e, relation);
+ final SpatialOperation finalRelation = relation;
+ ESLoggerFactory
+ .getLogger(GeoFilterIT.class.getName())
+ .info((Supplier<?>) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e);
return false;
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java
index dedd47d3e4..20216e1059 100644
--- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java
+++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java
@@ -32,6 +32,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
public class InternalSearchHitTests extends ESTestCase {
@@ -63,19 +64,15 @@ public class InternalSearchHitTests extends ESTestCase {
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f);
- InternalSearchHits.StreamContext context = new InternalSearchHits.StreamContext();
- context.streamShardTarget(InternalSearchHits.StreamContext.ShardTargetType.STREAM);
BytesStreamOutput output = new BytesStreamOutput();
- hits.writeTo(output, context);
+ hits.writeTo(output);
InputStream input = output.bytes().streamInput();
- context = new InternalSearchHits.StreamContext();
- context.streamShardTarget(InternalSearchHits.StreamContext.ShardTargetType.STREAM);
- InternalSearchHits results = InternalSearchHits.readSearchHits(new InputStreamStreamInput(input), context);
+ InternalSearchHits results = InternalSearchHits.readSearchHits(new InputStreamStreamInput(input));
assertThat(results.getAt(0).shard(), equalTo(target));
- assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).shard(), nullValue());
- assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).shard(), nullValue());
- assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).shard(), nullValue());
- assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).shard(), nullValue());
+ assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).shard(), notNullValue());
+ assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).shard(), notNullValue());
+ assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).shard(), notNullValue());
+ assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).shard(), notNullValue());
assertThat(results.getAt(1).shard(), equalTo(target));
}
diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java
index a49fdef559..452b6b6ba3 100644
--- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java
@@ -20,60 +20,20 @@
package org.elasticsearch.search.internal;
import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.indices.IndicesModule;
-import org.elasticsearch.search.SearchModule;
-import org.elasticsearch.search.SearchRequestTests;
-import org.elasticsearch.snapshots.Snapshot;
-import org.elasticsearch.snapshots.SnapshotId;
-import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.VersionUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.elasticsearch.search.AbstractSearchTestCase;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import static java.util.Collections.emptyList;
-
-public class ShardSearchTransportRequestTests extends ESTestCase {
-
- private static NamedWriteableRegistry namedWriteableRegistry;
-
- @BeforeClass
- public static void beforeClass() {
- IndicesModule indicesModule = new IndicesModule(emptyList()) {
- @Override
- protected void configure() {
- bindMapperExtension();
- }
- };
- SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
- List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
- entries.addAll(indicesModule.getNamedWriteables());
- entries.addAll(searchModule.getNamedWriteables());
- namedWriteableRegistry = new NamedWriteableRegistry(entries);
- }
-
- @AfterClass
- public static void afterClass() {
- namedWriteableRegistry = null;
- }
+public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
public void testSerialization() throws Exception {
ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest();
@@ -93,19 +53,17 @@ public class ShardSearchTransportRequestTests extends ESTestCase {
assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType());
assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId());
assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards());
+ assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey());
assertNotSame(deserializedRequest, shardSearchTransportRequest);
}
}
}
- private static ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException {
- SearchRequest searchRequest = SearchRequestTests.createSearchRequest();
+ private ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException {
+ SearchRequest searchRequest = createSearchRequest();
ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt());
- Snapshot snapshot = new Snapshot(randomAsciiOfLengthBetween(3, 10),
- new SnapshotId(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10)));
- RestoreSource restoreSource = new RestoreSource(snapshot, VersionUtils.randomVersion(random()), randomAsciiOfLengthBetween(3, 10));
- ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, restoreSource, randomBoolean(),
- new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason"));
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED,
+ new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason"));
String[] filteringAliases;
if (randomBoolean()) {
filteringAliases = generateRandomStringArray(10, 10, false, false);
diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
index 0b89e56dd1..21f1586075 100644
--- a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
+++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
@@ -38,7 +38,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
-import static org.elasticsearch.client.Requests.indexAliasesRequest;
import static org.elasticsearch.client.Requests.indexRequest;
import static org.elasticsearch.client.Requests.refreshRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
@@ -108,8 +107,9 @@ public class MoreLikeThisIT extends ESIntegTestCase {
.startObject("text").field("type", "text").endObject()
.endObject().endObject().endObject()));
logger.info("Creating aliases alias release");
- client().admin().indices().aliases(indexAliasesRequest().addAlias("release", termQuery("text", "release"), "test")).actionGet();
- client().admin().indices().aliases(indexAliasesRequest().addAlias("beta", termQuery("text", "beta"), "test")).actionGet();
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "release", termQuery("text", "release"))
+ .addAlias("test", "beta", termQuery("text", "beta")).get();
logger.info("Running Cluster Health");
assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
@@ -155,8 +155,8 @@ public class MoreLikeThisIT extends ESIntegTestCase {
.startObject("properties")
.endObject()
.endObject().endObject().string();
- client().admin().indices().prepareCreate(indexName).addMapping(typeName, mapping).execute().actionGet();
- client().admin().indices().aliases(indexAliasesRequest().addAlias(aliasName, indexName)).actionGet();
+ client().admin().indices().prepareCreate(indexName).addMapping(typeName, mapping).get();
+ client().admin().indices().prepareAliases().addAlias(indexName, aliasName).get();
assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
diff --git a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java
index f245629a28..342da16f50 100644
--- a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java
+++ b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java
@@ -21,6 +21,8 @@ package org.elasticsearch.search.profile.aggregation;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedOrdinalsSamplerAggregator;
import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregator;
@@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.diversifiedSampler;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
@@ -187,6 +190,129 @@ public class AggregationProfilerIT extends ESIntegTestCase {
}
}
+ public void testMultiLevelProfileBreadthFirst() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(true)
+ .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L).subAggregation(terms("terms")
+ .collectMode(SubAggCollectionMode.BREADTH_FIRST).field(TAG_FIELD).subAggregation(avg("avg").field(NUMBER_FIELD))))
+ .get();
+ assertSearchResponse(response);
+ Map<String, ProfileShardResult> profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
+ for (ProfileShardResult profileShardResult : profileResults.values()) {
+ assertThat(profileShardResult, notNullValue());
+ AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
+ assertThat(aggProfileResults, notNullValue());
+ List<ProfileResult> aggProfileResultsList = aggProfileResults.getProfileResults();
+ assertThat(aggProfileResultsList, notNullValue());
+ assertThat(aggProfileResultsList.size(), equalTo(1));
+ ProfileResult histoAggResult = aggProfileResultsList.get(0);
+ assertThat(histoAggResult, notNullValue());
+ assertThat(histoAggResult.getQueryName(),
+ equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator"));
+ assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
+ assertThat(histoAggResult.getTime(), greaterThan(0L));
+ Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown();
+ assertThat(histoBreakdown, notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1));
+
+ ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0);
+ assertThat(termsAggResult, notNullValue());
+ assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
+ assertThat(termsAggResult.getLuceneDescription(), equalTo("terms"));
+ assertThat(termsAggResult.getTime(), greaterThan(0L));
+ Map<String, Long> termsBreakdown = termsAggResult.getTimeBreakdown();
+ assertThat(termsBreakdown, notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1));
+
+ ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0);
+ assertThat(avgAggResult, notNullValue());
+ assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
+ assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
+ assertThat(avgAggResult.getTime(), greaterThan(0L));
+ Map<String, Long> avgBreakdown = termsAggResult.getTimeBreakdown();
+ assertThat(avgBreakdown, notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
+ }
+ }
+
+ public void testDiversifiedAggProfile() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(true)
+ .addAggregation(diversifiedSampler("diversify").shardSize(10).field(STRING_FIELD).maxDocsPerValue(2)
+ .subAggregation(max("max").field(NUMBER_FIELD)))
+ .get();
+ assertSearchResponse(response);
+ Map<String, ProfileShardResult> profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
+ for (ProfileShardResult profileShardResult : profileResults.values()) {
+ assertThat(profileShardResult, notNullValue());
+ AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
+ assertThat(aggProfileResults, notNullValue());
+ List<ProfileResult> aggProfileResultsList = aggProfileResults.getProfileResults();
+ assertThat(aggProfileResultsList, notNullValue());
+ assertThat(aggProfileResultsList.size(), equalTo(1));
+ ProfileResult diversifyAggResult = aggProfileResultsList.get(0);
+ assertThat(diversifyAggResult, notNullValue());
+ assertThat(diversifyAggResult.getQueryName(),
+ equalTo(DiversifiedOrdinalsSamplerAggregator.class.getName()));
+ assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify"));
+ assertThat(diversifyAggResult.getTime(), greaterThan(0L));
+ Map<String, Long> histoBreakdown = diversifyAggResult.getTimeBreakdown();
+ assertThat(histoBreakdown, notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(diversifyAggResult.getProfiledChildren().size(), equalTo(1));
+
+ ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0);
+ assertThat(maxAggResult, notNullValue());
+ assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName()));
+ assertThat(maxAggResult.getLuceneDescription(), equalTo("max"));
+ assertThat(maxAggResult.getTime(), greaterThan(0L));
+ Map<String, Long> termsBreakdown = maxAggResult.getTimeBreakdown();
+ assertThat(termsBreakdown, notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0));
+ }
+ }
+
public void testComplexProfile() {
SearchResponse response = client().prepareSearch("idx").setProfile(true)
.addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)
diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java
index cb55f88ff8..693fffa307 100644
--- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java
+++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java
@@ -260,8 +260,8 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
.addSort("_uid", SortOrder.ASC)
.setQuery(multiMatchQueryBuilder).get();
MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString());
- if (getType(multiMatchQueryBuilder) != null) {
- matchQueryBuilder.type(MatchQuery.Type.valueOf(getType(multiMatchQueryBuilder).matchQueryType().toString()));
+ if (multiMatchQueryBuilder.getType() != null) {
+ matchQueryBuilder.type(MatchQuery.Type.valueOf(multiMatchQueryBuilder.getType().matchQueryType().toString()));
}
SearchResponse matchResp = client().prepareSearch("test")
// _uid tie sort
@@ -569,9 +569,10 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
// test if boosts work
searchResponse = client().prepareSearch("test")
- .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 10)
+ .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10)
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
.operator(Operator.AND))).get();
+ assertHitCount(searchResponse, 2L);
assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted
assertSecondHit(searchResponse, hasId("ultimate2"));
assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
@@ -582,6 +583,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
.setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
.operator(Operator.AND))).get();
+ assertHitCount(searchResponse, 2L);
assertFirstHit(searchResponse, hasId("ultimate2"));
assertSecondHit(searchResponse, hasId("ultimate1"));
assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
@@ -590,28 +592,33 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
searchResponse = client().prepareSearch("test")
.setQuery(randomizeType(multiMatchQuery("15", "skill")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("theone"));
searchResponse = client().prepareSearch("test")
.setQuery(randomizeType(multiMatchQuery("15", "skill", "first_name")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("theone"));
// Two numeric fields together caused trouble at one point!
searchResponse = client().prepareSearch("test")
.setQuery(randomizeType(multiMatchQuery("15", "int-field", "skill")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("theone"));
searchResponse = client().prepareSearch("test")
.setQuery(randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("theone"));
searchResponse = client().prepareSearch("test")
.setQuery(randomizeType(multiMatchQuery("alpha 15", "first_name", "skill")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
.lenient(true))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("ultimate1"));
/*
* Doesn't find theone because "alpha 15" isn't a number and we don't
@@ -624,6 +631,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
.setQuery(randomizeType(multiMatchQuery("alpha 15", "int-field", "first_name", "skill")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
.lenient(true))).get();
+ assertHitCount(searchResponse, 1L);
assertFirstHit(searchResponse, hasId("ultimate1"));
}
@@ -647,21 +655,21 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
}
}
- public static List<String> fill(List<String> list, String value, int times) {
+ private static List<String> fill(List<String> list, String value, int times) {
for (int i = 0; i < times; i++) {
list.add(value);
}
return list;
}
- public List<String> fillRandom(List<String> list, int times) {
+ private static List<String> fillRandom(List<String> list, int times) {
for (int i = 0; i < times; i++) {
- list.add(randomAsciiOfLengthBetween(1, 5));
+ list.add(randomAsciiOfLength(5));
}
return list;
}
- public <T> T randomPickExcept(List<T> fromList, T butNot) {
+ private static <T> T randomPickExcept(List<T> fromList, T butNot) {
while (true) {
T t = RandomPicks.randomFrom(random(), fromList);
if (t.equals(butNot)) {
@@ -671,9 +679,9 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
}
}
- public MultiMatchQueryBuilder randomizeType(MultiMatchQueryBuilder builder) {
+ private static MultiMatchQueryBuilder randomizeType(MultiMatchQueryBuilder builder) {
try {
- MultiMatchQueryBuilder.Type type = getType(builder);
+ MultiMatchQueryBuilder.Type type = builder.getType();
if (type == null && randomBoolean()) {
return builder;
}
@@ -715,8 +723,4 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
throw new RuntimeException(ex);
}
}
-
- private MultiMatchQueryBuilder.Type getType(MultiMatchQueryBuilder builder) throws NoSuchFieldException, IllegalAccessException {
- return builder.getType();
- }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
index 3ea9eac5cc..84f83c46e1 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
@@ -21,6 +21,8 @@ package org.elasticsearch.search.sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.Accountable;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@@ -218,7 +220,8 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
protected QueryShardContext createMockShardContext() {
Index index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
- IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY);
+ IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index,
+ Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);
IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY),
cache, null, null);
@@ -241,8 +244,8 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
@Override
public ObjectMapper getObjectMapper(String name) {
- BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());
- return new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context);
+ BuilderContext context = new BuilderContext(this.getIndexSettings().getSettings(), new ContentPath());
+ return (ObjectMapper) new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context);
}
};
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
index 78c77e15f3..84dd3dabf6 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
@@ -122,11 +122,11 @@ public class FieldSortIT extends ESIntegTestCase {
}
}
- @LuceneTestCase.BadApple(bugUrl = "simon is working on this")
public void testIssue6614() throws ExecutionException, InterruptedException {
List<IndexRequestBuilder> builders = new ArrayList<>();
boolean strictTimeBasedIndices = randomBoolean();
final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month
+ int docs = 0;
for (int i = 0; i < numIndices; i++) {
final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx";
if (strictTimeBasedIndices || i == 0) {
@@ -142,9 +142,10 @@ public class FieldSortIT extends ESIntegTestCase {
String.format(Locale.ROOT, "%02d", j+1) +
":00:00"));
}
+ indexRandom(true, builders);
+ docs += builders.size();
+ builders.clear();
}
- int docs = builders.size();
- indexRandom(true, builders);
SearchResponse allDocsResponse = client().prepareSearch().setQuery(
QueryBuilders.boolQuery().must(QueryBuilders.termQuery("foo", "bar")).must(
QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")))
diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java
index 95c20381cf..f39b3ff92f 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java
@@ -67,8 +67,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
return Arrays.asList(InternalSettingsPlugin.class);
}
- public void testSimpleDistance() throws Exception {
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ public void testLegacyGeoDistanceRangeQuery() throws Exception {
+ Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_4_0);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("location").field("type", "geo_point");
@@ -230,7 +230,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
.actionGet();
client().prepareIndex("test", "type1", "3")
- .setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
+ .setSource(jsonBuilder().startObject().array("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
@@ -238,7 +238,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
.execute().actionGet();
client().prepareIndex("test", "type1", "4")
- .setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").startArray("locations")
+ .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").startArray("locations")
// to NY: 1.055 km
.startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
// to NY: 1.258 km
@@ -246,7 +246,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
.execute().actionGet();
client().prepareIndex("test", "type1", "5")
- .setSource(jsonBuilder().startObject().field("names", "Greenwich Village", "Brooklyn").startArray("locations")
+ .setSource(jsonBuilder().startObject().array("names", "Greenwich Village", "Brooklyn").startArray("locations")
// to NY: 2.029 km
.startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
// to NY: 8.572 km
@@ -355,14 +355,14 @@ public class GeoDistanceIT extends ESIntegTestCase {
ensureGreen();
client().prepareIndex("test", "type1", "1")
- .setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
+ .setSource(jsonBuilder().startObject().array("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
.startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject())
.execute().actionGet();
- client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").endObject())
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject())
.execute().actionGet();
refresh();
diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java
index 2eef021be3..707f646282 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java
@@ -25,7 +25,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.GeoValidationMethod;
@@ -70,7 +69,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
* |___________________________
* 1 2 3 4 5 6 7
*/
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point"));
XContentBuilder d1Builder = jsonBuilder();
@@ -97,35 +96,35 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d));
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d2", "d1");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d));
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d));
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d2", "d1");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d));
}
public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedException, IOException {
@@ -135,7 +134,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
* d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4
* d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5
*/
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point"));
XContentBuilder d1Builder = jsonBuilder();
@@ -155,19 +154,19 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d2", "d1");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(0, 0, 0, 4, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(0, 0, 0, 5, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d));
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
- assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(0, 0, 0, 4, DistanceUnit.KILOMETERS), 0.01d));
- assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(0, 0, 0, 5, DistanceUnit.KILOMETERS), 0.01d));
+ assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d));
+ assertThat((Double)searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d));
}
protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] pointsArray) throws IOException {
@@ -195,7 +194,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
* |______________________
* 1 2 3 4 5 6
*/
- Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
+ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point"));
XContentBuilder d1Builder = jsonBuilder();
@@ -236,19 +235,19 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
- assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-4));
- assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(4.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-4));
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1));
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC))
.execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
- assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(3.25, 4, 2, 1, DistanceUnit.KILOMETERS), 1.e-4));
- assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(5.25, 4, 2, 1, DistanceUnit.KILOMETERS), 1.e-4));
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1));
}
@@ -264,7 +263,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC))
.execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
@@ -272,7 +271,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC))
.execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
@@ -280,7 +279,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
- .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC))
.execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
@@ -288,36 +287,35 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
.prepareSearch()
.setSource(
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0)
- .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
+ )).execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
searchResponse = client()
.prepareSearch()
.setSource(
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0")
- .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
+ )).execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
searchResponse = client()
.prepareSearch()
.setSource(
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0)
- .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
+ )).execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
searchResponse = client()
.prepareSearch()
.setSource(
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0)
- .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE)
.validation(GeoValidationMethod.COERCE))).execute().actionGet();
checkCorrectSortOrderForGeoSort(searchResponse);
}
private static void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) {
assertOrderedSearchHits(searchResponse, "d2", "d1");
- assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 2, DistanceUnit.KILOMETERS), 1.e-4));
- assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 1, DistanceUnit.KILOMETERS), 1.e-4));
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 1, 2, DistanceUnit.METERS), 1.e-1));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.SLOPPY_ARC.calculate(2, 2, 1, 1, DistanceUnit.METERS), 1.e-1));
}
protected void createQPoints(List<String> qHashes, List<GeoPoint> qPoints) {
diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
index 4dd14cc523..6e3ac76e8a 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
@@ -20,8 +20,7 @@
package org.elasticsearch.search.sort;
-import org.apache.lucene.queryparser.xml.builders.MatchAllDocsQueryBuilder;
-import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.search.SortField;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.ParseFieldMatcher;
@@ -33,11 +32,12 @@ import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.index.mapper.GeoPointFieldMapper;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.GeoValidationMethod;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.test.geo.RandomGeoGenerator;
@@ -110,7 +110,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
@Override
protected MappedFieldType provideMappedFieldType(String name) {
- MappedFieldType clone = GeoPointFieldMapper.Defaults.FIELD_TYPE.clone();
+ MappedFieldType clone = LatLonPointFieldMapper.Defaults.FIELD_TYPE.clone();
clone.setName(name);
return clone;
}
@@ -182,7 +182,6 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
@Override
protected void sortFieldAssertions(GeoDistanceSortBuilder builder, SortField sortField, DocValueFormat format) throws IOException {
- assertEquals(SortField.Type.CUSTOM, sortField.getType());
assertEquals(builder.order() == SortOrder.ASC ? false : true, sortField.getReverse());
assertEquals(builder.fieldName(), sortField.getField());
}
@@ -472,4 +471,35 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
protected GeoDistanceSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
return GeoDistanceSortBuilder.fromXContent(context, fieldName);
}
+
+ public void testCommonCaseIsOptimized() throws IOException {
+ // make sure the below tests test something...
+ assertFalse(SortField.class.equals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass()));
+
+ QueryShardContext context = createMockShardContext();
+ // The common case should use LatLonDocValuesField.newDistanceSort
+ GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("", new GeoPoint(3.5, 2.1));
+ SortFieldAndFormat sort = builder.build(context);
+ assertEquals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass(), sort.field.getClass());
+
+ // however this might be disabled by fancy options
+ builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1), new GeoPoint(3.0, 4));
+ sort = builder.build(context);
+ assertEquals(SortField.class, sort.field.getClass()); // 2 points -> plain SortField with a custom comparator
+
+ builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
+ builder.unit(DistanceUnit.KILOMETERS);
+ sort = builder.build(context);
+ assertEquals(SortField.class, sort.field.getClass()); // km rather than m -> plain SortField with a custom comparator
+
+ builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
+ builder.order(SortOrder.DESC);
+ sort = builder.build(context);
+ assertEquals(SortField.class, sort.field.getClass()); // descending means the max value should be considered rather than min
+
+ builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
+ builder.setNestedPath("some_nested_path");
+ sort = builder.build(context);
+ assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with nested fields
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
index 6a4cc61e04..e96b02c0e2 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
@@ -182,7 +182,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);
ScriptSortBuilder builder = ScriptSortBuilder.fromXContent(context, null);
assertEquals("doc['field_name'].value * factor", builder.script().getScript());
- assertNull(builder.script().getLang());
+ assertEquals(Script.DEFAULT_SCRIPT_LANG, builder.script().getLang());
assertEquals(1.1, builder.script().getParams().get("factor"));
assertEquals(ScriptType.INLINE, builder.script().getType());
assertEquals(ScriptSortType.NUMBER, builder.type());
@@ -208,7 +208,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT);
ScriptSortBuilder builder = ScriptSortBuilder.fromXContent(context, null);
assertEquals("doc['field_name'].value", builder.script().getScript());
- assertNull(builder.script().getLang());
+ assertEquals(Script.DEFAULT_SCRIPT_LANG, builder.script().getLang());
assertNull(builder.script().getParams());
assertEquals(ScriptType.INLINE, builder.script().getType());
assertEquals(ScriptSortType.NUMBER, builder.type());
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java
index 80cda7d7b5..24a82526ed 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java
@@ -25,7 +25,6 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
-import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.MockScriptPlugin;
@@ -251,9 +250,9 @@ public class SimpleSortIT extends ESIntegTestCase {
.setSource(jsonBuilder()
.startObject()
.field("ord", i)
- .field("svalue", new String[]{"" + i, "" + (i + 1), "" + (i + 2)})
- .field("lvalue", new long[]{i, i + 1, i + 2})
- .field("dvalue", new double[]{i, i + 1, i + 2})
+ .array("svalue", new String[]{"" + i, "" + (i + 1), "" + (i + 2)})
+ .array("lvalue", new long[]{i, i + 1, i + 2})
+ .array("dvalue", new double[]{i, i + 1, i + 2})
.startObject("gvalue")
.field("lat", (double) i + 1)
.field("lon", (double) i)
diff --git a/core/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/core/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java
new file mode 100644
index 0000000000..f1a0f7da45
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/source/MetadataFetchingIT.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.source;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.SearchContextException;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class MetadataFetchingIT extends ESIntegTestCase {
+ public void testSimple() {
+ assertAcked(prepareCreate("test"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .storedFields("_none_")
+ .setFetchSource(false)
+ .get();
+ assertThat(response.getHits().getAt(0).getId(), nullValue());
+ assertThat(response.getHits().getAt(0).getType(), nullValue());
+ assertThat(response.getHits().getAt(0).sourceAsString(), nullValue());
+
+ response = client()
+ .prepareSearch("test")
+ .storedFields("_none_")
+ .get();
+ assertThat(response.getHits().getAt(0).getId(), nullValue());
+ assertThat(response.getHits().getAt(0).getType(), nullValue());
+ assertThat(response.getHits().getAt(0).sourceAsString(), nullValue());
+ }
+
+ public void testWithRouting() {
+ assertAcked(prepareCreate("test"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").setRouting("toto").execute().actionGet();
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .storedFields("_none_")
+ .setFetchSource(false)
+ .get();
+ assertThat(response.getHits().getAt(0).getId(), nullValue());
+ assertThat(response.getHits().getAt(0).getType(), nullValue());
+ assertThat(response.getHits().getAt(0).field("_routing"), nullValue());
+ assertThat(response.getHits().getAt(0).sourceAsString(), nullValue());
+
+ response = client()
+ .prepareSearch("test")
+ .storedFields("_none_")
+ .get();
+ assertThat(response.getHits().getAt(0).getId(), nullValue());
+ assertThat(response.getHits().getAt(0).getType(), nullValue());
+ assertThat(response.getHits().getAt(0).sourceAsString(), nullValue());
+ }
+
+ public void testInvalid() {
+ assertAcked(prepareCreate("test"));
+ ensureGreen();
+
+ index("test", "type1", "1", "field", "value");
+ refresh();
+
+ {
+ SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
+ () -> client().prepareSearch("test").setFetchSource(true).storedFields("_none_").get());
+ Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchContextException.class);
+ assertNotNull(rootCause);
+ assertThat(rootCause.getClass(), equalTo(SearchContextException.class));
+ assertThat(rootCause.getMessage(),
+ equalTo("`stored_fields` cannot be disabled if _source is requested"));
+ }
+ {
+ SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
+ () -> client().prepareSearch("test").storedFields("_none_").setVersion(true).get());
+ Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchContextException.class);
+ assertNotNull(rootCause);
+ assertThat(rootCause.getClass(), equalTo(SearchContextException.class));
+ assertThat(rootCause.getMessage(),
+ equalTo("`stored_fields` cannot be disabled if version is requested"));
+ }
+ {
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> client().prepareSearch("test").storedFields("_none_", "field1").setVersion(true).get());
+ assertThat(exc.getMessage(),
+ equalTo("cannot combine _none_ with other fields"));
+ }
+ {
+ IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
+ () -> client().prepareSearch("test").storedFields("_none_").storedFields("field1").setVersion(true).get());
+ assertThat(exc.getMessage(),
+ equalTo("cannot combine _none_ with other fields"));
+ }
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java
index 49ad540d95..172183c57c 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java
@@ -99,7 +99,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase {
client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(jsonBuilder()
.startObject().startObject(FIELD)
- .field("input", input[i])
+ .array("input", input[i])
.endObject()
.endObject()
)
@@ -941,7 +941,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase {
builders[i] = client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(jsonBuilder()
.startObject().startObject(FIELD)
- .field("input", input[i])
+ .array("input", input[i])
.field("output", surface[i])
.startObject("payload").field("id", i).endObject()
.field("weight", 1) // WE FORCEFULLY INDEX A BOGUS WEIGHT
@@ -955,7 +955,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase {
builders[i] = client().prepareIndex(INDEX, TYPE, "n" + i)
.setSource(jsonBuilder()
.startObject().startObject(FIELD)
- .field("input", input[i])
+ .array("input", input[i])
.field("output", surface[i])
.startObject("payload").field("id", i).endObject()
.field("weight", weight[i])
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
index 9fae823c43..fa94eabeb5 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
@@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.geo.GeoPoint;
@@ -29,6 +30,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.suggest.CompletionSuggestSearchIT.CompletionMappingBuilder;
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping;
@@ -51,6 +53,8 @@ import java.util.Map;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.core.IsEqual.equalTo;
@SuppressCodecs("*") // requires custom completion format
public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
@@ -157,6 +161,27 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
assertSuggestions("foo", prefix, "sugxgestion9", "sugxgestion8", "sugxgestion7", "sugxgestion6", "sugxgestion5");
}
+ public void testContextFilteringWorksWithUTF8Categories() throws Exception {
+ CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build();
+ LinkedHashMap<String, ContextMapping> map = new LinkedHashMap<>(Collections.singletonMap("cat", contextMapping));
+ final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map);
+ createIndexAndMapping(mapping);
+ IndexResponse indexResponse = client().prepareIndex(INDEX, TYPE, "1")
+ .setSource(jsonBuilder().startObject()
+ .startObject(FIELD)
+ .field("input", "suggestion")
+ .endObject()
+ .field("cat", "ctx\\u00e4")
+ .endObject())
+ .get();
+ assertThat(indexResponse.status(), equalTo(RestStatus.CREATED));
+ assertNoFailures(client().admin().indices().prepareRefresh(INDEX).get());
+ CompletionSuggestionBuilder contextSuggestQuery = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg")
+ .contexts(Collections.singletonMap("cat",
+ Collections.singletonList(CategoryQueryContext.builder().setCategory("ctx\\u00e4").build())));
+ assertSuggestions("foo", contextSuggestQuery, "suggestion");
+ }
+
public void testSingleContextFiltering() throws Exception {
CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build();
LinkedHashMap<String, ContextMapping> map = new LinkedHashMap<String, ContextMapping>(Collections.singletonMap("cat", contextMapping));
@@ -557,7 +582,8 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
}
public void testGeoField() throws Exception {
-
+// Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
+// Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder mapping = jsonBuilder();
mapping.startObject();
mapping.startObject(TYPE);
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearch2xIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearch2xIT.java
index a3d72f671f..50733f1083 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearch2xIT.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearch2xIT.java
@@ -190,7 +190,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
.startObject("context")
.startObject("location")
.field("type", "geo")
- .field("precision", precision)
+ .array("precision", precision)
.endObject()
.endObject()
.endObject().endObject()
@@ -209,7 +209,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
.startObject("context")
.startObject("location")
.field("type", "geo")
- .field("precision", precision)
+ .array("precision", precision)
.endObject()
.endObject()
.endObject().endObject()
@@ -314,7 +314,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
{ "pizza - treptow", "pizza", "food" } };
for (int i = 0; i < locations.length; i++) {
- XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).field("input", input[i])
+ XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).array("input", input[i])
.startObject("context").field("st", locations[i]).endObject().field("payload", locations[i])
.endObject().endObject();
client().prepareIndex(INDEX, TYPE, "" + i).setSource(source).execute().actionGet();
@@ -343,7 +343,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
.addMapping(TYPE, createMapping(TYPE, ContextBuilder.category("st"))));
for (int i = 0; i < HEROS.length; i++) {
- XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).field("input", HEROS[i])
+ XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).array("input", HEROS[i])
.startObject("context").field("st", i%3).endObject()
.startObject("payload").field("group", i % 3).field("id", i).endObject()
.endObject().endObject();
@@ -376,12 +376,12 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
XContentBuilder doc1 = jsonBuilder();
doc1.startObject().startObject("suggest_field")
.field("input", "backpack_red")
- .startObject("context").field("color", "red", "all_colors").endObject()
+ .startObject("context").array("color", "red", "all_colors").endObject()
.endObject().endObject();
XContentBuilder doc2 = jsonBuilder();
doc2.startObject().startObject("suggest_field")
.field("input", "backpack_green")
- .startObject("context").field("color", "green", "all_colors").endObject()
+ .startObject("context").array("color", "green", "all_colors").endObject()
.endObject().endObject();
client().prepareIndex(INDEX, TYPE, "1")
@@ -451,7 +451,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(
jsonBuilder().startObject().field("category", Integer.toString(i % 3)).startObject(FIELD)
- .field("input", HEROS[i])
+ .array("input", HEROS[i])
.startObject("context").endObject().field("payload", Integer.toString(i % 3))
.endObject().endObject()).execute().actionGet();
}
@@ -508,7 +508,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(
jsonBuilder().startObject().startArray("category").value(Integer.toString(i % 3)).value("other").endArray()
- .startObject(FIELD).field("input", HEROS[i]).startObject("context").endObject()
+ .startObject(FIELD).array("input", HEROS[i]).startObject("context").endObject()
.field("payload", Integer.toString(i % 3)).endObject().endObject()).execute().actionGet();
}
@@ -535,7 +535,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(
jsonBuilder().startObject().field("categoryA").value("" + (char) ('0' + (i % 3))).field("categoryB")
- .value("" + (char) ('A' + (i % 3))).startObject(FIELD).field("input", HEROS[i])
+ .value("" + (char) ('A' + (i % 3))).startObject(FIELD).array("input", HEROS[i])
.startObject("context").endObject().field("payload", ((char) ('0' + (i % 3))) + "" + (char) ('A' + (i % 3)))
.endObject().endObject()).execute().actionGet();
}
@@ -561,7 +561,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
for (int i = 0; i < HEROS.length; i++) {
String source = jsonBuilder().startObject().field("categoryA", "" + (char) ('0' + (i % 3)))
- .field("categoryB", "" + (char) ('a' + (i % 3))).startObject(FIELD).field("input", HEROS[i])
+ .field("categoryB", "" + (char) ('a' + (i % 3))).startObject(FIELD).array("input", HEROS[i])
.startObject("context").endObject().startObject("payload").field("categoryA", "" + (char) ('0' + (i % 3)))
.field("categoryB", "" + (char) ('a' + (i % 3))).endObject().endObject().endObject().string();
client().prepareIndex(INDEX, TYPE, "" + i).setSource(source).execute().actionGet();
@@ -599,7 +599,7 @@ public class ContextSuggestSearch2xIT extends ESIntegTestCase {
String type = types[i % types.length];
client().prepareIndex(INDEX, type, "" + i)
.setSource(
- jsonBuilder().startObject().startObject(FIELD).field("input", HEROS[i])
+ jsonBuilder().startObject().startObject(FIELD).array("input", HEROS[i])
.startObject("context").endObject().field("payload", type).endObject().endObject()).execute().actionGet();
}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java
index de912bb963..fd7a33ee5b 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java
@@ -77,7 +77,7 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase {
.field("weight", 4)
.endObject()
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.field("weight", 5)
.endObject()
.endArray()
@@ -107,7 +107,7 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase {
.startObject()
.startArray("completion")
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.startObject("contexts")
.field("ctx", "ctx1")
.endObject()
@@ -139,7 +139,7 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase {
ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder()
.startObject()
.startObject("completion")
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.startObject("contexts")
.array("ctx", "ctx1", "ctx2", "ctx3")
.endObject()
@@ -175,7 +175,7 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase {
.startObject()
.startArray("completion")
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.field("weight", 5)
.startObject("contexts")
.array("ctx", "ctx1", "ctx2", "ctx3")
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java
index 33624ed800..2237c1a41d 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java
@@ -77,7 +77,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase {
.field("weight", 4)
.endObject()
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.field("weight", 5)
.endObject()
.endArray()
@@ -108,7 +108,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase {
.startObject()
.startArray("completion")
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.startObject("contexts")
.startObject("ctx")
.field("lat", 43.6624803)
@@ -143,7 +143,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase {
ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder()
.startObject()
.startObject("completion")
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.startObject("contexts")
.startArray("ctx")
.startObject()
@@ -188,7 +188,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase {
.startObject()
.startArray("completion")
.startObject()
- .field("input", "suggestion5", "suggestion6", "suggestion7")
+ .array("input", "suggestion5", "suggestion6", "suggestion7")
.field("weight", 5)
.startObject("contexts")
.array("loc1", "ezs42e44yx96")
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
index 0640bf0d6f..d66fd8596b 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
@@ -19,9 +19,9 @@
package org.elasticsearch.search.suggest.phrase;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
index c71bfa0c1e..5562e2d402 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.snapshots;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -281,7 +283,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
@Override
public void onFailure(String source, Exception e) {
- logger.warn("failed to execute [{}]", e, source);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to execute [{}]", source), e);
}
});
diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index f512f1da53..3a045c80ac 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -44,8 +44,8 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.ttl.IndicesTTLService;
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
index 54a0539192..3e43cc8304 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
@@ -1504,12 +1504,24 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
logger.info("--> checking that _current no longer returns the snapshot");
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").execute().actionGet().getSnapshots().isEmpty(), equalTo(true));
- try {
- client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").execute().actionGet();
- fail();
- } catch (SnapshotMissingException ex) {
- // Expected
- }
+ // test that getting an unavailable snapshot status throws an exception if ignoreUnavailable is false on the request
+ SnapshotMissingException ex = expectThrows(SnapshotMissingException.class, () ->
+ client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").get());
+ assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage());
+ // test that getting an unavailable snapshot status does not throw an exception if ignoreUnavailable is true on the request
+ response = client.admin().cluster().prepareSnapshotStatus("test-repo")
+ .addSnapshots("test-snap-doesnt-exist")
+ .setIgnoreUnavailable(true)
+ .get();
+ assertTrue(response.getSnapshots().isEmpty());
+ // test getting snapshot status for available and unavailable snapshots where ignoreUnavailable is true
+ // (available one should be returned)
+ response = client.admin().cluster().prepareSnapshotStatus("test-repo")
+ .addSnapshots("test-snap", "test-snap-doesnt-exist")
+ .setIgnoreUnavailable(true)
+ .get();
+ assertEquals(1, response.getSnapshots().size());
+ assertEquals("test-snap", response.getSnapshots().get(0).getSnapshot().getSnapshotId().getName());
}
public void testSnapshotRelocatingPrimary() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java
index 8f10ccd653..4297710505 100644
--- a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java
+++ b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java
@@ -18,9 +18,10 @@
*/
package org.elasticsearch.test;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.spi.LoggingEvent;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.filter.RegexFilter;
import org.elasticsearch.common.regex.Regex;
import java.util.ArrayList;
@@ -32,13 +33,14 @@ import static org.hamcrest.MatcherAssert.assertThat;
/**
* Test appender that can be used to verify that certain events were logged correctly
*/
-public class MockLogAppender extends AppenderSkeleton {
+public class MockLogAppender extends AbstractAppender {
private static final String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch.");
private List<LoggingExpectation> expectations;
- public MockLogAppender() {
+ public MockLogAppender() throws IllegalAccessException {
+ super("mock", RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
expectations = new ArrayList<>();
}
@@ -47,22 +49,12 @@ public class MockLogAppender extends AppenderSkeleton {
}
@Override
- protected void append(LoggingEvent loggingEvent) {
+ public void append(LogEvent event) {
for (LoggingExpectation expectation : expectations) {
- expectation.match(loggingEvent);
+ expectation.match(event);
}
}
- @Override
- public void close() {
-
- }
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
public void assertAllExpectationsMatched() {
for (LoggingExpectation expectation : expectations) {
expectation.assertMatched();
@@ -70,7 +62,7 @@ public class MockLogAppender extends AppenderSkeleton {
}
public interface LoggingExpectation {
- void match(LoggingEvent loggingEvent);
+ void match(LogEvent event);
void assertMatched();
}
@@ -91,10 +83,10 @@ public class MockLogAppender extends AppenderSkeleton {
}
@Override
- public void match(LoggingEvent event) {
- if (event.getLevel() == level && event.getLoggerName().equals(logger)) {
+ public void match(LogEvent event) {
+ if (event.getLevel().equals(level) && event.getLoggerName().equals(logger)) {
if (Regex.isSimpleMatchPattern(message)) {
- if (Regex.simpleMatch(message, event.getMessage().toString())) {
+ if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) {
saw = true;
}
} else {
diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java
index 974929dddf..56e18d5335 100644
--- a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java
+++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java
@@ -76,11 +76,11 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
.startObject()
.field("str_value", "s" + i)
- .field("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
+ .array("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
.field("l_value", i)
- .field("l_values", new int[]{i * 2, i * 2 + 1})
+ .array("l_values", new int[]{i * 2, i * 2 + 1})
.field("d_value", i)
- .field("d_values", new double[]{i * 2, i * 2 + 1})
+ .array("d_values", new double[]{i * 2, i * 2 + 1})
.endObject());
}
indexRandom(true, builders);
@@ -109,7 +109,7 @@ public class SimpleThreadPoolIT extends ESIntegTestCase {
String nodePrefix = "(" + Pattern.quote(InternalTestCluster.TRANSPORT_CLIENT_PREFIX) + ")?(" +
Pattern.quote(ESIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) + "|" +
Pattern.quote(ESIntegTestCase.TEST_CLUSTER_NODE_PREFIX) + "|" +
- Pattern.quote(TribeIT.SECOND_CLUSTER_NODE_PREFIX) + ")";
+ Pattern.quote("node_tribe2") + ")";
assertThat(threadName, RegexMatcher.matches("\\[" + nodePrefix + "\\d+\\]"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java
index 14cf10b8f3..bf0f5f6e60 100644
--- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java
@@ -40,11 +40,8 @@ import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
-/**
- *
- */
public class ThreadPoolSerializationTests extends ESTestCase {
- BytesStreamOutput output = new BytesStreamOutput();
+ private final BytesStreamOutput output = new BytesStreamOutput();
private ThreadPool.ThreadPoolType threadPoolType;
@Before
@@ -54,13 +51,13 @@ public class ThreadPoolSerializationTests extends ESTestCase {
}
public void testThatQueueSizeSerializationWorks() throws Exception {
- ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k"));
+ ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10,
+ TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k"));
output.setVersion(Version.CURRENT);
info.writeTo(output);
StreamInput input = output.bytes().streamInput();
- ThreadPool.Info newInfo = new ThreadPool.Info();
- newInfo.readFrom(input);
+ ThreadPool.Info newInfo = new ThreadPool.Info(input);
assertThat(newInfo.getQueueSize().singles(), is(10000L));
}
@@ -71,8 +68,7 @@ public class ThreadPoolSerializationTests extends ESTestCase {
info.writeTo(output);
StreamInput input = output.bytes().streamInput();
- ThreadPool.Info newInfo = new ThreadPool.Info();
- newInfo.readFrom(input);
+ ThreadPool.Info newInfo = new ThreadPool.Info(input);
assertThat(newInfo.getQueueSize(), is(nullValue()));
}
@@ -103,7 +99,8 @@ public class ThreadPoolSerializationTests extends ESTestCase {
}
public void testThatToXContentWritesInteger() throws Exception {
- ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k"));
+ ThreadPool.Info info = new ThreadPool.Info("foo", threadPoolType, 1, 10,
+ TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k"));
XContentBuilder builder = jsonBuilder();
builder.startObject();
info.toXContent(builder, ToXContent.EMPTY_PARAMS);
@@ -126,8 +123,7 @@ public class ThreadPoolSerializationTests extends ESTestCase {
info.writeTo(output);
StreamInput input = output.bytes().streamInput();
- ThreadPool.Info newInfo = new ThreadPool.Info();
- newInfo.readFrom(input);
+ ThreadPool.Info newInfo = new ThreadPool.Info(input);
assertThat(newInfo.getThreadPoolType(), is(threadPoolType));
}
diff --git a/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java
index 20f598a930..2fb6333587 100644
--- a/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java
+++ b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java
@@ -68,43 +68,34 @@ public class SimpleTimestampIT extends ESIntegTestCase {
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
long now2 = System.currentTimeMillis();
- // we check both realtime get and non realtime get
- GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
- long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
- assertThat(timestamp, greaterThanOrEqualTo(now1));
- assertThat(timestamp, lessThanOrEqualTo(now2));
- // verify its the same timestamp when going the replica
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
- assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
-
// non realtime get (stored)
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
- timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(randomBoolean()).execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
assertThat(timestamp, greaterThanOrEqualTo(now1));
assertThat(timestamp, lessThanOrEqualTo(now2));
// verify its the same timestamp when going the replica
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(randomBoolean()).execute().actionGet();
assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
logger.info("--> check with custom timestamp (numeric)");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("10").setRefreshPolicy(IMMEDIATE).get();
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(false).execute().actionGet();
timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
assertThat(timestamp, equalTo(10L));
// verify its the same timestamp when going the replica
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(false).execute().actionGet();
assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
logger.info("--> check with custom timestamp (string)");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("1970-01-01T00:00:00.020")
.setRefreshPolicy(IMMEDIATE).get();
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(false).execute().actionGet();
timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
assertThat(timestamp, equalTo(20L));
// verify its the same timestamp when going the replica
- getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_timestamp").setRealtime(false).execute().actionGet();
assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
}
diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java
deleted file mode 100644
index e5c734cbfb..0000000000
--- a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.transport;
-
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterName;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.inject.ModuleTestCase;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.indices.breaker.CircuitBreakerService;
-import org.elasticsearch.test.transport.AssertingLocalTransport;
-import org.elasticsearch.threadpool.ThreadPool;
-
-/** Unit tests for module registering custom transport and transport service */
-public class TransportModuleTests extends ModuleTestCase {
-
-
-
- static class FakeTransport extends AssertingLocalTransport {
- @Inject
- public FakeTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool,
- NamedWriteableRegistry namedWriteableRegistry) {
- super(settings, circuitBreakerService, threadPool, namedWriteableRegistry);
- }
- }
-
- static class FakeTransportService extends TransportService {
- @Inject
- public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
- super(settings, transport, threadPool);
- }
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java
index cfc4de6b3c..531c06f5be 100644
--- a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java
+++ b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java
@@ -65,7 +65,8 @@ public class TransportServiceHandshakeTests extends ESTestCase {
new NoneCircuitBreakerService(),
new NamedWriteableRegistry(Collections.emptyList()),
new NetworkService(settings, Collections.emptyList()));
- TransportService transportService = new MockTransportService(settings, transport, threadPool);
+ TransportService transportService = new MockTransportService(settings, transport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
transportService.start();
transportService.acceptIncomingRequests();
DiscoveryNode node =
diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java
index 01e6a490da..440859dce4 100644
--- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java
+++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java
@@ -19,434 +19,492 @@
package org.elasticsearch.tribe;
-import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
-import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
-import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.client.Client;
-import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.UUIDs;
-import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
-import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.NodeConfigurationSource;
-import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.transport.Transport;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import java.io.IOException;
-import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
-import java.util.Map;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Consumer;
import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+import static java.util.stream.Collectors.toSet;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.core.Is.is;
/**
* Note, when talking to tribe client, no need to set the local flag on master read operations, it
* does it by default.
*/
-@LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet
+@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
public class TribeIT extends ESIntegTestCase {
- public static final String SECOND_CLUSTER_NODE_PREFIX = "node_tribe2";
+ private static final String TRIBE_NODE = "tribe_node";
+ private static InternalTestCluster cluster1;
private static InternalTestCluster cluster2;
- private Node tribeNode;
- private Client tribeClient;
+ /**
+ * A predicate that is used to select none of the remote clusters
+ **/
+ private static final Predicate<InternalTestCluster> NONE = c -> false;
+
+ /**
+ * A predicate that is used to select the remote cluster 1 only
+ **/
+ private static final Predicate<InternalTestCluster> CLUSTER1_ONLY = c -> c.getClusterName().equals(cluster1.getClusterName());
+
+ /**
+ * A predicate that is used to select the remote cluster 2 only
+ **/
+ private static final Predicate<InternalTestCluster> CLUSTER2_ONLY = c -> c.getClusterName().equals(cluster2.getClusterName());
+
+ /**
+ * A predicate that is used to select the the two remote clusters
+ **/
+ private static final Predicate<InternalTestCluster> ALL = c -> true;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ // Required to delete _all indices on remote clusters
+ .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false)
+ .build();
+ }
+
+ @Override
+ protected Collection<Class<? extends Plugin>> nodePlugins() {
+ return getMockPlugins();
+ }
@Before
- public void setupSecondCluster() throws Exception {
+ public void startRemoteClusters() {
+ final int minNumDataNodes = 2;
+ final int maxNumDataNodes = 4;
+ final NodeConfigurationSource nodeConfigurationSource = getNodeConfigSource();
+ final Collection<Class<? extends Plugin>> plugins = nodePlugins();
+
+ if (cluster1 == null) {
+ cluster1 = new InternalTestCluster(randomLong(), createTempDir(), true, minNumDataNodes, maxNumDataNodes,
+ UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_1",
+ plugins, Function.identity());
+ }
+
if (cluster2 == null) {
- final NodeConfigurationSource configSource = getNodeConfigSource();
- cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, 2, 2,
- UUIDs.randomBase64UUID(random()), configSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(),
- Function.identity());
- cluster2.beforeTest(random(), 0.1);
- cluster2.ensureAtLeastNumDataNodes(2);
+ cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, minNumDataNodes, maxNumDataNodes,
+ UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_2",
+ plugins, Function.identity());
}
- }
- @AfterClass
- public static void tearDownSecondCluster() {
- if (cluster2 != null) {
+ doWithAllClusters(c -> {
try {
- cluster2.close();
- } finally {
- cluster2 = null;
+ c.beforeTest(random(), 0.1);
+ c.ensureAtLeastNumDataNodes(minNumDataNodes);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to set up remote cluster [" + c.getClusterName() + "]", e);
}
- }
+ });
}
@After
- public void tearDownTribeNode() throws IOException {
- if (cluster2 != null) {
+ public void wipeRemoteClusters() {
+ doWithAllClusters(c -> {
+ final String clusterName = c.getClusterName();
try {
- cluster2.wipe(Collections.<String>emptySet());
- } finally {
- cluster2.afterTest();
+ c.client().admin().indices().prepareDelete(MetaData.ALL).get();
+ c.afterTest();
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to clean up remote cluster [" + clusterName + "]", e);
}
- }
- if (tribeNode != null) {
- tribeNode.close();
- tribeNode = null;
- }
+ });
}
- private void setupTribeNode(Settings settings) {
- Map<String,String> asMap = internalCluster().getDefaultSettings().getAsMap();
- Settings.Builder tribe1Defaults = Settings.builder();
- Settings.Builder tribe2Defaults = Settings.builder();
- for (Map.Entry<String, String> entry : asMap.entrySet()) {
- if (entry.getKey().startsWith("path.")) {
- continue;
- }
- tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue());
- tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue());
+ @AfterClass
+ public static void stopRemoteClusters() {
+ try {
+ doWithAllClusters(InternalTestCluster::close);
+ } finally {
+ cluster1 = null;
+ cluster2 = null;
}
- // give each tribe it's unicast hosts to connect to
- tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(internalCluster().client()));
- tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client()));
-
- Settings merged = Settings.builder()
- .put(internalCluster().getDefaultSettings())
- .put("tribe.t1.cluster.name", internalCluster().getClusterName())
- .put("tribe.t2.cluster.name", cluster2.getClusterName())
- .put("tribe.t1.transport.type", "local")
- .put("tribe.t2.transport.type", "local")
- .put("tribe.t1.discovery.type", "local")
- .put("tribe.t2.discovery.type", "local")
- .put("transport.type", "local")
- .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
- .put("tribe.blocks.write", false)
- .put(NetworkModule.HTTP_ENABLED.getKey(), false)
- .put(settings)
-
- .put(tribe1Defaults.build())
- .put(tribe2Defaults.build())
- .put("node.name", "tribe_node") // make sure we can identify threads from this node
- .build();
+ }
- tribeNode = new Node(merged).start();
- tribeClient = tribeNode.client();
+ private Releasable startTribeNode() throws Exception {
+ return startTribeNode(ALL, Settings.EMPTY);
}
- public void testGlobalReadWriteBlocks() throws Exception {
- logger.info("create 2 indices, test1 on t1, and test2 on t2");
- internalCluster().client().admin().indices().prepareCreate("test1").get();
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ private Releasable startTribeNode(Predicate<InternalTestCluster> filter, Settings settings) throws Exception {
+ final String node = internalCluster().startNode(createTribeSettings(filter).put(settings).build());
+ return () -> {
+ try {
+ while(internalCluster().getNodeNames().length > 0) {
+ internalCluster().stopRandomNode(s -> true);
+ }
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to close tribe node [" + node + "]", e);
+ }
+ };
+ }
+ private Settings.Builder createTribeSettings(Predicate<InternalTestCluster> filter) {
+ assertNotNull(filter);
+
+ final Settings.Builder settings = Settings.builder();
+ settings.put(Node.NODE_NAME_SETTING.getKey(), TRIBE_NODE);
+ settings.put(Node.NODE_DATA_SETTING.getKey(), false);
+ settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
+ settings.put(NetworkModule.HTTP_ENABLED.getKey(), false);
+ settings.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT);
+ settings.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT);
+
+ doWithAllClusters(filter, c -> {
+ String tribeSetting = "tribe." + c.getClusterName() + ".";
+ settings.put(tribeSetting + ClusterName.CLUSTER_NAME_SETTING.getKey(), c.getClusterName());
+ settings.put(tribeSetting + DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "100ms");
+ settings.put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT);
+ settings.put(tribeSetting + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT);
+
+ Set<String> hosts = new HashSet<>();
+ for (Transport transport : c.getInstances(Transport.class)) {
+ TransportAddress address = transport.boundAddress().publishAddress();
+ hosts.add(address.getHost() + ":" + address.getPort());
+ }
+ settings.putArray(tribeSetting + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(),
+ hosts.toArray(new String[hosts.size()]));
+ });
- setupTribeNode(Settings.builder()
+ return settings;
+ }
+
+ public void testGlobalReadWriteBlocks() throws Exception {
+ Settings additionalSettings = Settings.builder()
.put("tribe.blocks.write", true)
.put("tribe.blocks.metadata", true)
- .build());
+ .build();
- logger.info("wait till tribe has the same nodes as the 2 clusters");
- awaitSameNodeCounts();
- // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
- logger.info("wait till test1 and test2 exists in the tribe node state");
- awaitIndicesInClusterState("test1", "test2");
+ try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
+ // Creates 2 indices, test1 on cluster1 and test2 on cluster2
+ assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
+ ensureGreen(cluster1.client());
- try {
- tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").execute().actionGet();
- fail("cluster block should be thrown");
- } catch (ClusterBlockException e) {
- // all is well!
- }
- try {
- tribeClient.admin().indices().prepareForceMerge("test1").execute().actionGet();
- fail("cluster block should be thrown");
- } catch (ClusterBlockException e) {
- // all is well!
- }
- try {
- tribeClient.admin().indices().prepareForceMerge("test2").execute().actionGet();
- fail("cluster block should be thrown");
- } catch (ClusterBlockException e) {
- // all is well!
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ ensureGreen(cluster2.client());
+
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
+
+ // Wait for the tribe node to retrieve the indices into its cluster state
+ assertIndicesExist(client(), "test1", "test2");
+
+ // Writes not allowed through the tribe node
+ ClusterBlockException e = expectThrows(ClusterBlockException.class, () -> {
+ client().prepareIndex("test1", "type1").setSource("field", "value").get();
+ });
+ assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/11/tribe node, write not allowed]"));
+
+ e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("test2", "type2").setSource("field", "value").get());
+ assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/11/tribe node, write not allowed]"));
+
+ e = expectThrows(ClusterBlockException.class, () -> client().admin().indices().prepareForceMerge("test1").get());
+ assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/10/tribe node, metadata not allowed]"));
+
+ e = expectThrows(ClusterBlockException.class, () -> client().admin().indices().prepareForceMerge("test2").get());
+ assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/10/tribe node, metadata not allowed]"));
}
}
public void testIndexWriteBlocks() throws Exception {
- logger.info("create 2 indices, test1 on t1, and test2 on t2");
- assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
- assertAcked(internalCluster().client().admin().indices().prepareCreate("block_test1"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("block_test2"));
-
- setupTribeNode(Settings.builder()
+ Settings additionalSettings = Settings.builder()
.put("tribe.blocks.write.indices", "block_*")
- .build());
- logger.info("wait till tribe has the same nodes as the 2 clusters");
- awaitSameNodeCounts();
- // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
- logger.info("wait till test1 and test2 exists in the tribe node state");
- awaitIndicesInClusterState("test1", "test2", "block_test1", "block_test2");
-
- tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
- try {
- tribeClient.prepareIndex("block_test1", "type1", "1").setSource("field1", "value1").get();
- fail("cluster block should be thrown");
- } catch (ClusterBlockException e) {
- // all is well!
- }
+ .build();
- tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
- try {
- tribeClient.prepareIndex("block_test2", "type1", "1").setSource("field1", "value1").get();
- fail("cluster block should be thrown");
- } catch (ClusterBlockException e) {
- // all is well!
+ try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
+ // Creates 2 indices on each remote cluster, test1 and block_test1 on cluster1 and test2 and block_test2 on cluster2
+ assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster1.client().admin().indices().prepareCreate("block_test1"));
+ ensureGreen(cluster1.client());
+
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("block_test2"));
+ ensureGreen(cluster2.client());
+
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
+
+ // Wait for the tribe node to retrieve the indices into its cluster state
+ assertIndicesExist(client(), "test1", "test2", "block_test1", "block_test2");
+
+ // Writes allowed through the tribe node for test1/test2 indices
+ client().prepareIndex("test1", "type1").setSource("field", "value").get();
+ client().prepareIndex("test2", "type2").setSource("field", "value").get();
+
+ ClusterBlockException e;
+ e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("block_test1", "type1").setSource("foo", 0).get());
+ assertThat(e.getMessage(), containsString("blocked by: [FORBIDDEN/8/index write (api)]"));
+
+ e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("block_test2", "type2").setSource("foo", 0).get());
+ assertThat(e.getMessage(), containsString("blocked by: [FORBIDDEN/8/index write (api)]"));
}
}
public void testOnConflictDrop() throws Exception {
- logger.info("create 2 indices, test1 on t1, and test2 on t2");
- assertAcked(cluster().client().admin().indices().prepareCreate("conflict"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
- assertAcked(cluster().client().admin().indices().prepareCreate("test1"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
-
- setupTribeNode(Settings.builder()
+ Settings additionalSettings = Settings.builder()
.put("tribe.on_conflict", "drop")
- .build());
+ .build();
- logger.info("wait till tribe has the same nodes as the 2 clusters");
- awaitSameNodeCounts();
+ try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
+ // Creates 2 indices on each remote cluster, test1 and conflict on cluster1 and test2 and also conflict on cluster2
+ assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster1.client().admin().indices().prepareCreate("conflict"));
+ ensureGreen(cluster1.client());
- // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
- logger.info("wait till test1 and test2 exists in the tribe node state");
- awaitIndicesInClusterState("test1", "test2");
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
+ ensureGreen(cluster2.client());
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test1").getSettings().get("tribe.name"), equalTo("t1"));
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test2").getSettings().get("tribe.name"), equalTo("t2"));
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().hasIndex("conflict"), equalTo(false));
- }
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
- public void testOnConflictPrefer() throws Exception {
- testOnConflictPrefer(randomBoolean() ? "t1" : "t2");
- }
+ // Wait for the tribe node to retrieve the indices into its cluster state
+ assertIndicesExist(client(), "test1", "test2");
- private void testOnConflictPrefer(String tribe) throws Exception {
- logger.info("testing preference for tribe {}", tribe);
-
- logger.info("create 2 indices, test1 on t1, and test2 on t2");
- assertAcked(internalCluster().client().admin().indices().prepareCreate("conflict"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
- assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
-
- setupTribeNode(Settings.builder()
- .put("tribe.on_conflict", "prefer_" + tribe)
- .build());
- logger.info("wait till tribe has the same nodes as the 2 clusters");
- awaitSameNodeCounts();
- // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
- logger.info("wait till test1 and test2 exists in the tribe node state");
- awaitIndicesInClusterState("test1", "test2", "conflict");
-
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test1").getSettings().get("tribe.name"), equalTo("t1"));
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test2").getSettings().get("tribe.name"), equalTo("t2"));
- assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("conflict").getSettings().get("tribe.name"), equalTo(tribe));
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().hasIndex("test1"), is(true));
+ assertThat(clusterState.getMetaData().index("test1").getSettings().get("tribe.name"), equalTo(cluster1.getClusterName()));
+ assertThat(clusterState.getMetaData().hasIndex("test2"), is(true));
+ assertThat(clusterState.getMetaData().index("test2").getSettings().get("tribe.name"), equalTo(cluster2.getClusterName()));
+ assertThat(clusterState.getMetaData().hasIndex("conflict"), is(false));
+ }
}
- public void testTribeOnOneCluster() throws Exception {
- setupTribeNode(Settings.EMPTY);
- logger.info("create 2 indices, test1 on t1, and test2 on t2");
- assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
-
-
- // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
- logger.info("wait till test1 and test2 exists in the tribe node state");
- awaitIndicesInClusterState("test1", "test2");
-
- logger.info("wait till tribe has the same nodes as the 2 clusters");
- awaitSameNodeCounts();
-
- assertThat(tribeClient.admin().cluster().prepareHealth().setWaitForGreenStatus().get().getStatus(), equalTo(ClusterHealthStatus.GREEN));
-
- logger.info("create 2 docs through the tribe node");
- tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
- tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
- tribeClient.admin().indices().prepareRefresh().get();
-
- logger.info("verify they are there");
- assertHitCount(tribeClient.prepareSearch().setSize(0).get(), 2L);
- assertHitCount(tribeClient.prepareSearch().get(), 2L);
- assertBusy(new Runnable() {
- @Override
- public void run() {
- ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
- assertThat(tribeState.getMetaData().index("test1").mapping("type1"), notNullValue());
- assertThat(tribeState.getMetaData().index("test2").mapping("type1"), notNullValue());
- }
- });
+ public void testOnConflictPrefer() throws Exception {
+ final String preference = randomFrom(cluster1, cluster2).getClusterName();
+ Settings additionalSettings = Settings.builder()
+ .put("tribe.on_conflict", "prefer_" + preference)
+ .build();
+ try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
+ assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster1.client().admin().indices().prepareCreate("shared"));
+ ensureGreen(cluster1.client());
- logger.info("write to another type");
- tribeClient.prepareIndex("test1", "type2", "1").setSource("field1", "value1").get();
- tribeClient.prepareIndex("test2", "type2", "1").setSource("field1", "value1").get();
- assertNoFailures(tribeClient.admin().indices().prepareRefresh().get());
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("shared"));
+ ensureGreen(cluster2.client());
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
- logger.info("verify they are there");
- assertHitCount(tribeClient.prepareSearch().setSize(0).get(), 4L);
- assertHitCount(tribeClient.prepareSearch().get(), 4L);
- assertBusy(new Runnable() {
- @Override
- public void run() {
- ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
- assertThat(tribeState.getMetaData().index("test1").mapping("type1"), notNullValue());
- assertThat(tribeState.getMetaData().index("test1").mapping("type2"), notNullValue());
- assertThat(tribeState.getMetaData().index("test2").mapping("type1"), notNullValue());
- assertThat(tribeState.getMetaData().index("test2").mapping("type2"), notNullValue());
- }
- });
+ // Wait for the tribe node to retrieve the indices into its cluster state
+ assertIndicesExist(client(), "test1", "test2", "shared");
- logger.info("make sure master level write operations fail... (we don't really have a master)");
- try {
- tribeClient.admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get();
- fail();
- } catch (MasterNotDiscoveredException e) {
- // all is well!
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().hasIndex("test1"), is(true));
+ assertThat(clusterState.getMetaData().index("test1").getSettings().get("tribe.name"), equalTo(cluster1.getClusterName()));
+ assertThat(clusterState.getMetaData().hasIndex("test2"), is(true));
+ assertThat(clusterState.getMetaData().index("test2").getSettings().get("tribe.name"), equalTo(cluster2.getClusterName()));
+ assertThat(clusterState.getMetaData().hasIndex("shared"), is(true));
+ assertThat(clusterState.getMetaData().index("shared").getSettings().get("tribe.name"), equalTo(preference));
}
+ }
- logger.info("delete an index, and make sure its reflected");
- cluster2.client().admin().indices().prepareDelete("test2").get();
- awaitIndicesNotInClusterState("test2");
-
- try {
- logger.info("stop a node, make sure its reflected");
- cluster2.stopRandomDataNode();
- awaitSameNodeCounts();
- } finally {
- cluster2.startNode();
- awaitSameNodeCounts();
+ public void testTribeOnOneCluster() throws Exception {
+ try (Releasable tribeNode = startTribeNode()) {
+ // Creates 2 indices, test1 on cluster1 and test2 on cluster2
+ assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
+ ensureGreen(cluster1.client());
+
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ ensureGreen(cluster2.client());
+
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
+
+ // Wait for the tribe node to retrieve the indices into its cluster state
+ assertIndicesExist(client(), "test1", "test2");
+
+ // Creates two docs using the tribe node
+ indexRandom(true,
+ client().prepareIndex("test1", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test2", "type1", "1").setSource("field1", "value1")
+ );
+
+ // Verify that documents are searchable using the tribe node
+ assertHitCount(client().prepareSearch().get(), 2L);
+
+ // Using assertBusy to check that the mappings are in the tribe node cluster state
+ assertBusy(() -> {
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().index("test1").mapping("type1"), notNullValue());
+ assertThat(clusterState.getMetaData().index("test2").mapping("type1"), notNullValue());
+ });
+
+ // More documents with another type
+ indexRandom(true,
+ client().prepareIndex("test1", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test2", "type2", "1").setSource("field1", "value1")
+ );
+ assertHitCount(client().prepareSearch().get(), 4L);
+ assertBusy(() -> {
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().index("test1").mapping("type1"), notNullValue());
+ assertThat(clusterState.getMetaData().index("test1").mapping("type2"), notNullValue());
+
+ assertThat(clusterState.getMetaData().index("test2").mapping("type1"), notNullValue());
+ assertThat(clusterState.getMetaData().index("test2").mapping("type2"), notNullValue());
+ });
+
+ // Make sure master level write operations fail... (we don't really have a master)
+ expectThrows(MasterNotDiscoveredException.class, () -> {
+ client().admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get();
+ });
+
+ // Now delete an index and makes sure it's reflected in cluster state
+ cluster2.client().admin().indices().prepareDelete("test2").get();
+ assertBusy(() -> {
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertFalse(clusterState.getMetaData().hasIndex("test2"));
+ assertFalse(clusterState.getRoutingTable().hasIndex("test2"));
+ });
}
}
public void testCloseAndOpenIndex() throws Exception {
- //create an index and close it even before starting the tribe node
- assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
- ensureGreen(internalCluster());
- assertAcked(internalCluster().client().admin().indices().prepareClose("test1"));
-
- setupTribeNode(Settings.EMPTY);
- awaitSameNodeCounts();
-
- //the closed index is not part of the tribe node cluster state
- ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
- assertThat(tribeState.getMetaData().hasIndex("test1"), equalTo(false));
-
- //open the index, it becomes part of the tribe node cluster state
- assertAcked(internalCluster().client().admin().indices().prepareOpen("test1"));
- awaitIndicesInClusterState("test1");
- ensureGreen(internalCluster());
-
- //create a second index, wait till it is seen from within the tribe node
- assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
- awaitIndicesInClusterState("test1", "test2");
- ensureGreen(cluster2);
-
- //close the second index, wait till it gets removed from the tribe node cluster state
- assertAcked(cluster2.client().admin().indices().prepareClose("test2"));
- awaitIndicesNotInClusterState("test2");
-
- //open the second index, wait till it gets added back to the tribe node cluster state
- assertAcked(cluster2.client().admin().indices().prepareOpen("test2"));
- awaitIndicesInClusterState("test1", "test2");
- ensureGreen(cluster2);
+ // Creates an index on remote cluster 1
+ assertTrue(cluster1.client().admin().indices().prepareCreate("first").get().isAcknowledged());
+ ensureGreen(cluster1.client());
+
+ // Closes the index
+ assertTrue(cluster1.client().admin().indices().prepareClose("first").get().isAcknowledged());
+
+ try (Releasable tribeNode = startTribeNode()) {
+ // Wait for the tribe node to connect to the two remote clusters
+ assertNodes(ALL);
+
+ // The closed index is not part of the tribe node cluster state
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertFalse(clusterState.getMetaData().hasIndex("first"));
+
+ // Open the index, it becomes part of the tribe node cluster state
+ assertTrue(cluster1.client().admin().indices().prepareOpen("first").get().isAcknowledged());
+ assertIndicesExist(client(), "first");
+
+ // Create a second index, wait till it is seen from within the tribe node
+ assertTrue(cluster2.client().admin().indices().prepareCreate("second").get().isAcknowledged());
+ assertIndicesExist(client(), "first", "second");
+ ensureGreen(cluster2.client());
+
+ // Close the second index, wait till it gets removed from the tribe node cluster state
+ assertTrue(cluster2.client().admin().indices().prepareClose("second").get().isAcknowledged());
+ assertIndicesExist(client(), "first");
+
+ // Open the second index, wait till it gets added back to the tribe node cluster state
+ assertTrue(cluster2.client().admin().indices().prepareOpen("second").get().isAcknowledged());
+ assertIndicesExist(client(), "first", "second");
+ ensureGreen(cluster2.client());
+ }
}
- private void awaitIndicesInClusterState(final String... indices) throws Exception {
- assertBusy(new Runnable() {
- @Override
- public void run() {
- ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
- for (String index : indices) {
- assertTrue(tribeState.getMetaData().hasIndex(index));
- assertTrue(tribeState.getRoutingTable().hasIndex(index));
- }
+ /**
+ * Test that the tribe node's cluster state correctly reflect the number of nodes
+ * of the remote clusters the tribe node is connected to.
+ */
+ public void testClusterStateNodes() throws Exception {
+ List<Predicate<InternalTestCluster>> predicates = Arrays.asList(NONE, CLUSTER1_ONLY, CLUSTER2_ONLY, ALL);
+ Collections.shuffle(predicates, random());
+
+ for (Predicate<InternalTestCluster> predicate : predicates) {
+ try (Releasable tribeNode = startTribeNode(predicate, Settings.EMPTY)) {
+ assertNodes(predicate);
}
- });
+ }
}
- private void awaitIndicesNotInClusterState(final String... indices) throws Exception {
- assertBusy(new Runnable() {
- @Override
- public void run() {
- ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
- for (String index : indices) {
- assertFalse(tribeState.getMetaData().hasIndex(index));
- assertFalse(tribeState.getRoutingTable().hasIndex(index));
- }
+ private void assertIndicesExist(Client client, String... indices) throws Exception {
+ assertBusy(() -> {
+ ClusterState state = client.admin().cluster().prepareState().setRoutingTable(true).setMetaData(true).get().getState();
+ assertThat(state.getMetaData().getIndices().size(), equalTo(indices.length));
+ for (String index : indices) {
+ assertTrue(state.getMetaData().hasIndex(index));
+ assertTrue(state.getRoutingTable().hasIndex(index));
}
});
}
- private void ensureGreen(TestCluster testCluster) {
- ClusterHealthResponse actionGet = testCluster.client().admin().cluster()
- .health(Requests.clusterHealthRequest().waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
- if (actionGet.isTimedOut()) {
- logger.info("ensureGreen timed out, cluster state:\n{}\n{}", testCluster.client().admin().cluster().prepareState().get().getState().prettyPrint(), testCluster.client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
- assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
- }
- assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ private void ensureGreen(Client client) throws Exception {
+ assertBusy(() -> {
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster() .prepareHealth()
+ .setWaitForActiveShards(0)
+ .setWaitForEvents(Priority.LANGUID)
+ .setWaitForNoRelocatingShards(true)
+ .get();
+ assertThat(clusterHealthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertFalse(clusterHealthResponse.isTimedOut());
+ });
}
- private void awaitSameNodeCounts() throws Exception {
- assertBusy(new Runnable() {
- @Override
- public void run() {
- DiscoveryNodes tribeNodes = tribeNode.client().admin().cluster().prepareState().get().getState().getNodes();
- assertThat(countDataNodesForTribe("t1", tribeNodes), equalTo(internalCluster().client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size()));
- assertThat(countDataNodesForTribe("t2", tribeNodes), equalTo(cluster2.client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size()));
+ private static void assertNodes(Predicate<InternalTestCluster> filter) throws Exception {
+ final Set<String> expectedNodes = Sets.newHashSet(internalCluster().getNodeNames());
+ doWithAllClusters(filter, c -> {
+ // Adds the tribe client node dedicated to this remote cluster
+ for (String tribeNode : internalCluster().getNodeNames()) {
+ expectedNodes.add(tribeNode + "/" + c.getClusterName());
}
+ // Adds the remote clusters nodes names
+ Collections.addAll(expectedNodes, c.getNodeNames());
+ });
+
+ assertBusy(() -> {
+ ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState();
+ Set<String> nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet());
+ assertThat(nodes.containsAll(expectedNodes), is(true));
});
}
- private int countDataNodesForTribe(String tribeName, DiscoveryNodes nodes) {
- int count = 0;
- for (DiscoveryNode node : nodes) {
- if (!node.isDataNode()) {
- continue;
- }
- if (tribeName.equals(node.getAttributes().get("tribe.name"))) {
- count++;
- }
- }
- return count;
+ private static void doWithAllClusters(Consumer<InternalTestCluster> consumer) {
+ doWithAllClusters(cluster -> cluster != null, consumer);
}
- public String[] getUnicastHosts(Client client) {
- ArrayList<String> unicastHosts = new ArrayList<>();
- NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setTransport(true).get();
- for (NodeInfo info : nodeInfos.getNodes()) {
- TransportAddress address = info.getTransport().getAddress().publishAddress();
- unicastHosts.add(address.getAddress() + ":" + address.getPort());
- }
- return unicastHosts.toArray(new String[unicastHosts.size()]);
+ private static void doWithAllClusters(Predicate<InternalTestCluster> predicate, Consumer<InternalTestCluster> consumer) {
+ Stream.of(cluster1, cluster2).filter(predicate).forEach(consumer);
}
}
diff --git a/core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java b/core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java
index 5e614d244c..5716e57c96 100644
--- a/core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java
+++ b/core/src/test/java/org/elasticsearch/ttl/SimpleTTLIT.java
@@ -117,7 +117,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
// realtime get check
long currentTime = System.currentTimeMillis();
- GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").get();
long ttl0;
if (getResponse.isExists()) {
ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
@@ -127,7 +127,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
}
// verify the ttl is still decreasing when going to the replica
currentTime = System.currentTimeMillis();
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").get();
if (getResponse.isExists()) {
ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
@@ -136,7 +136,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
}
// non realtime get (stored)
currentTime = System.currentTimeMillis();
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(false).get();
if (getResponse.isExists()) {
ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
@@ -145,7 +145,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
}
// non realtime get going the replica
currentTime = System.currentTimeMillis();
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(false).get();
if (getResponse.isExists()) {
ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
@@ -154,10 +154,10 @@ public class SimpleTTLIT extends ESIntegTestCase {
}
// no TTL provided so no TTL fetched
- getResponse = client().prepareGet("test", "type1", "no_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "no_ttl").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
assertThat(getResponse.getField("_ttl"), nullValue());
// no TTL provided make sure it has default TTL
- getResponse = client().prepareGet("test", "type2", "default_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type2", "default_ttl").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl0, greaterThan(0L));
@@ -190,28 +190,28 @@ public class SimpleTTLIT extends ESIntegTestCase {
));
// realtime get check
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
- getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
// replica realtime get check
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
- getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setStoredFields("_ttl").setRealtime(true).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
// Need to run a refresh, in order for the non realtime get to work.
client().admin().indices().prepareRefresh("test").execute().actionGet();
// non realtime get (stored) check
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(false).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
- getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setStoredFields("_ttl").setRealtime(false).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
// non realtime get going the replica check
- getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "1").setStoredFields("_ttl").setRealtime(false).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
- getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setStoredFields("_ttl").setRealtime(false).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(false));
}
@@ -287,7 +287,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
}
private long getTtl(String type, Object id) {
- GetResponse getResponse = client().prepareGet("test", type, id.toString()).setFields("_ttl").setRealtime(true).execute()
+ GetResponse getResponse = client().prepareGet("test", type, id.toString()).setStoredFields("_ttl").execute()
.actionGet();
return ((Number) getResponse.getField("_ttl").getValue()).longValue();
}
diff --git a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java
index e81b4decb2..ac142fa461 100644
--- a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java
+++ b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java
@@ -123,12 +123,12 @@ public class TimestampTTLBWIT extends ESIntegTestCase {
// check TTL is kept after an update without TTL
client().prepareIndex("test", "type1", "2").setSource("field", 1).setTTL(86400000L).setRefreshPolicy(IMMEDIATE).get();
- GetResponse getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", "2").setStoredFields("_ttl").execute().actionGet();
long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl, greaterThan(0L));
client().prepareUpdate(indexOrAlias(), "type1", "2")
.setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet();
- getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setStoredFields("_ttl").execute().actionGet();
ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl, greaterThan(0L));
@@ -136,7 +136,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase {
client().prepareUpdate(indexOrAlias(), "type1", "2")
.setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values",
Collections.singletonMap("_ctx", Collections.singletonMap("_ttl", 3600000)))).execute().actionGet();
- getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setStoredFields("_ttl").execute().actionGet();
ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
assertThat(ttl, greaterThan(0L));
assertThat(ttl, lessThanOrEqualTo(3600000L));
@@ -147,7 +147,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase {
.setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values",
Collections.singletonMap("_ctx", Collections.singletonMap("_timestamp", "2009-11-15T14:12:12")))).execute()
.actionGet();
- getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "3").setStoredFields("_timestamp").execute().actionGet();
long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
assertThat(timestamp, equalTo(1258294332000L));
}
diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java
index a71bd466ad..fc360effb0 100644
--- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java
+++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java
@@ -469,7 +469,7 @@ public class UpdateIT extends ESIntegTestCase {
UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
.setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
.setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo")))
- .setFields("_source")
+ .setFetchSource(true)
.execute().actionGet();
assertThat(updateResponse.getIndex(), equalTo("test"));
@@ -527,15 +527,9 @@ public class UpdateIT extends ESIntegTestCase {
.setVersionType(VersionType.EXTERNAL).execute(),
ActionRequestValidationException.class);
-
- // With force version
- client().prepareUpdate(indexOrAlias(), "type", "2")
- .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v10")))
- .setVersion(10).setVersionType(VersionType.FORCE).get();
-
GetResponse get = get("test", "type", "2");
assertThat(get.getVersion(), equalTo(10L));
- assertThat((String) get.getSource().get("text"), equalTo("v10"));
+ assertThat((String) get.getSource().get("text"), equalTo("value"));
// upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
@@ -555,7 +549,7 @@ public class UpdateIT extends ESIntegTestCase {
UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
.setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
.setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo")))
- .setFields("_source")
+ .setFetchSource(true)
.execute().actionGet();
assertThat(updateResponse.getIndex(), equalTo("test"));
@@ -630,14 +624,30 @@ public class UpdateIT extends ESIntegTestCase {
// check fields parameter
client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
- .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).setFields("_source", "field")
- .execute().actionGet();
+ .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null))
+ .setFields("field")
+ .setFetchSource(true)
+ .execute().actionGet();
assertThat(updateResponse.getIndex(), equalTo("test"));
assertThat(updateResponse.getGetResult(), notNullValue());
assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue());
+ // check _source parameter
+ client().prepareIndex("test", "type1", "1").setSource("field1", 1, "field2", 2).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("field1", ScriptService.ScriptType.INLINE, "field_inc", null))
+ .setFetchSource("field1", "field2")
+ .get();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
+ assertThat(updateResponse.getGetResult().field("field1"), nullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().size(), equalTo(1));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("field1"), equalTo(2));
+
// check updates without script
// add new field
client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java
index e43991efcc..417defee5f 100644
--- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java
+++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java
@@ -30,7 +30,6 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.VersionType;
-import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.test.ESIntegTestCase;
@@ -73,36 +72,6 @@ public class SimpleVersioningIT extends ESIntegTestCase {
assertThat(indexResponse.getVersion(), equalTo(18L));
}
- public void testForce() throws Exception {
- createIndex("test");
- ensureGreen("test"); // we are testing force here which doesn't work if we are recovering at the same time - zzzzz...
- IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.FORCE).get();
- assertThat(indexResponse.getVersion(), equalTo(12L));
-
- indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(12).setVersionType(VersionType.FORCE).get();
- assertThat(indexResponse.getVersion(), equalTo(12L));
-
- indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(14).setVersionType(VersionType.FORCE).get();
- assertThat(indexResponse.getVersion(), equalTo(14L));
-
- indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.FORCE).get();
- assertThat(indexResponse.getVersion(), equalTo(13L));
-
- client().admin().indices().prepareRefresh().execute().actionGet();
- if (randomBoolean()) {
- refresh();
- }
- for (int i = 0; i < 10; i++) {
- assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(13L));
- }
-
- // deleting with a lower version works.
- long v = randomIntBetween(12, 14);
- DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(v).setVersionType(VersionType.FORCE).get();
- assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
- assertThat(deleteResponse.getVersion(), equalTo(v));
- }
-
public void testExternalGTE() throws Exception {
createIndex("test");
@@ -648,11 +617,7 @@ public class SimpleVersioningIT extends ESIntegTestCase {
}
if (threadRandom.nextInt(100) == 7) {
logger.trace("--> {}: TEST: now flush at {}", threadID, System.nanoTime() - startTime);
- try {
- flush();
- } catch (FlushNotAllowedEngineException fnaee) {
- // OK
- }
+ flush();
logger.trace("--> {}: TEST: flush done at {}", threadID, System.nanoTime() - startTime);
}
}
diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip b/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip
index 8568484235..ca3d11099c 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip b/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip
index 394bbef103..47496a9f01 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip
index a5e30e1d52..3b45995941 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.0.0.zip b/core/src/test/resources/indices/bwc/index-2.0.0.zip
index 58a05e6617..2dae323f69 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.0.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.0.1.zip b/core/src/test/resources/indices/bwc/index-2.0.1.zip
index eb5c3033b8..2d0d5f42d5 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip
index 7394c8fd83..f6a9492b33 100644
--- a/core/src/test/resources/indices/bwc/index-2.0.2.zip
+++ b/core/src/test/resources/indices/bwc/index-2.0.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.1.0.zip b/core/src/test/resources/indices/bwc/index-2.1.0.zip
index ebf72f047c..347d9cb31e 100644
--- a/core/src/test/resources/indices/bwc/index-2.1.0.zip
+++ b/core/src/test/resources/indices/bwc/index-2.1.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.1.1.zip b/core/src/test/resources/indices/bwc/index-2.1.1.zip
index a98ffb4cb4..6981c9af4a 100644
--- a/core/src/test/resources/indices/bwc/index-2.1.1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.1.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.1.2.zip b/core/src/test/resources/indices/bwc/index-2.1.2.zip
index 8488fb2a2b..57162675b1 100644
--- a/core/src/test/resources/indices/bwc/index-2.1.2.zip
+++ b/core/src/test/resources/indices/bwc/index-2.1.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.2.0.zip b/core/src/test/resources/indices/bwc/index-2.2.0.zip
index 797ca24f4e..81ff74d5ab 100644
--- a/core/src/test/resources/indices/bwc/index-2.2.0.zip
+++ b/core/src/test/resources/indices/bwc/index-2.2.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.2.1.zip b/core/src/test/resources/indices/bwc/index-2.2.1.zip
index 8d8e55ae62..7e640e4158 100644
--- a/core/src/test/resources/indices/bwc/index-2.2.1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.2.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.2.2.zip b/core/src/test/resources/indices/bwc/index-2.2.2.zip
index 08b1270863..f6c5c7653d 100644
--- a/core/src/test/resources/indices/bwc/index-2.2.2.zip
+++ b/core/src/test/resources/indices/bwc/index-2.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.0.zip b/core/src/test/resources/indices/bwc/index-2.3.0.zip
index 9e11bd8493..c09e5d8ba1 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.0.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.1.zip b/core/src/test/resources/indices/bwc/index-2.3.1.zip
index dfe632d522..de10f7926d 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.1.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.2.zip b/core/src/test/resources/indices/bwc/index-2.3.2.zip
index 1457cf3499..eff6c8cd15 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.2.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.3.zip b/core/src/test/resources/indices/bwc/index-2.3.3.zip
index aced41714f..751819741b 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.3.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.3.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.4.zip b/core/src/test/resources/indices/bwc/index-2.3.4.zip
index 2d8514724b..b69f100398 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.4.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.4.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.3.5.zip b/core/src/test/resources/indices/bwc/index-2.3.5.zip
index ce8319ef0e..dd64e69995 100644
--- a/core/src/test/resources/indices/bwc/index-2.3.5.zip
+++ b/core/src/test/resources/indices/bwc/index-2.3.5.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-2.4.0.zip b/core/src/test/resources/indices/bwc/index-2.4.0.zip
new file mode 100644
index 0000000000..14bd436b16
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/index-2.4.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip
index 707c7b9da4..4a46dbc838 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip
index d01c151d79..6e4080a914 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip
index b66a72975a..deb36fee11 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0.zip b/core/src/test/resources/indices/bwc/repo-2.0.0.zip
index 6de1513fe5..8042696cb9 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.0.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.1.zip b/core/src/test/resources/indices/bwc/repo-2.0.1.zip
index e9d9cc8a70..6e9b3d0aed 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip
index 08888ff3ab..4dd61b0f26 100644
--- a/core/src/test/resources/indices/bwc/repo-2.0.2.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.0.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.1.0.zip b/core/src/test/resources/indices/bwc/repo-2.1.0.zip
index 3b4bf2718e..b641e0b5bb 100644
--- a/core/src/test/resources/indices/bwc/repo-2.1.0.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.1.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip
index 76ac5b9645..e08cde10b3 100644
--- a/core/src/test/resources/indices/bwc/repo-2.1.1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.1.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.1.2.zip b/core/src/test/resources/indices/bwc/repo-2.1.2.zip
index 460a69c69c..f9829c219f 100644
--- a/core/src/test/resources/indices/bwc/repo-2.1.2.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.1.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.2.0.zip b/core/src/test/resources/indices/bwc/repo-2.2.0.zip
index f2208b734c..703184dac1 100644
--- a/core/src/test/resources/indices/bwc/repo-2.2.0.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.2.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.2.1.zip b/core/src/test/resources/indices/bwc/repo-2.2.1.zip
index 35f7425b20..c665f79c11 100644
--- a/core/src/test/resources/indices/bwc/repo-2.2.1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.2.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.2.2.zip b/core/src/test/resources/indices/bwc/repo-2.2.2.zip
index 4b1593326a..9e5e6fdd30 100644
--- a/core/src/test/resources/indices/bwc/repo-2.2.2.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.0.zip b/core/src/test/resources/indices/bwc/repo-2.3.0.zip
index cdc5fe171d..f41df41224 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.0.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.0.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.1.zip b/core/src/test/resources/indices/bwc/repo-2.3.1.zip
index 7dc79f86f2..78e736986a 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.1.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.1.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.2.zip b/core/src/test/resources/indices/bwc/repo-2.3.2.zip
index f86f827485..b160856326 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.2.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.3.zip b/core/src/test/resources/indices/bwc/repo-2.3.3.zip
index b94020ee80..411cbea5a2 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.3.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.3.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.4.zip b/core/src/test/resources/indices/bwc/repo-2.3.4.zip
index ddd92319d1..4afa60f7c7 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.4.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.4.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.3.5.zip b/core/src/test/resources/indices/bwc/repo-2.3.5.zip
index 73b27dc83c..5d2d00de96 100644
--- a/core/src/test/resources/indices/bwc/repo-2.3.5.zip
+++ b/core/src/test/resources/indices/bwc/repo-2.3.5.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-2.4.0.zip b/core/src/test/resources/indices/bwc/repo-2.4.0.zip
new file mode 100644
index 0000000000..c5f3c0d075
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/repo-2.4.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml
deleted file mode 100644
index 548b186e46..0000000000
--- a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# you can override using a command-line parameter
-# -E logger.level=(ERROR|WARN|INFO|DEBUG|TRACE)
-logger.level: INFO
-rootLogger: ${logger.level}, console
-logger:
- test: TRACE, console
-
-appender:
- console:
- type: console
- layout:
- type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml
deleted file mode 100644
index 71fbce639a..0000000000
--- a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/logging.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-logger:
- # log action execution errors for easier debugging
- second: DEBUG, console2
-
-appender:
- console2:
- type: console
- layout:
- type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml
deleted file mode 100644
index edfe0c9ed4..0000000000
--- a/core/src/test/resources/org/elasticsearch/common/logging/config/test2/test3/logging.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-logger:
- # log action execution errors for easier debugging
- third: DEBUG, console3
-
-appender:
- console3:
- type: console
- layout:
- type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file
diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py
index 80d68a6e25..5a262d23cb 100644
--- a/dev-tools/create_bwc_index.py
+++ b/dev-tools/create_bwc_index.py
@@ -58,19 +58,32 @@ def assert_sort(hits):
# Indexes the given number of document into the given index
# and randomly runs refresh, optimize and flush commands
-def index_documents(es, index_name, type, num_docs):
+def index_documents(es, index_name, type, num_docs, supports_dots_in_field_names):
logging.info('Indexing %s docs' % num_docs)
+ index(es, index_name, type, num_docs, supports_dots_in_field_names, True)
+ logging.info('Flushing index')
+ es.indices.flush(index=index_name)
+
+def index(es, index_name, type, num_docs, supports_dots_in_field_names, flush=False):
for id in range(0, num_docs):
- es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)),
- 'long_sort': random.randint(0, 100),
- 'double_sort' : float(random.randint(0, 100)),
- 'bool' : random.choice([True, False])})
+ body = {'string': str(random.randint(0, 100)),
+ 'long_sort': random.randint(0, 100),
+ 'double_sort' : float(random.randint(0, 100)),
+ 'bool' : random.choice([True, False])}
+ if supports_dots_in_field_names:
+ body['field.with.dots'] = str(random.randint(0, 100))
+
+ es.index(index=index_name, doc_type=type, id=id, body=body)
+
if rarely():
es.indices.refresh(index=index_name)
- if rarely():
+ if rarely() and flush:
es.indices.flush(index=index_name, force=frequently())
- logging.info('Flushing index')
- es.indices.flush(index=index_name)
+
+def reindex_docs(es, index_name, type, num_docs, supports_dots_in_field_names):
+ logging.info('Re-indexing %s docs' % num_docs)
+ # reindex some docs after the flush such that we have something in the translog
+ index(es, index_name, type, num_docs, supports_dots_in_field_names)
def delete_by_query(es, version, index_name, doc_type):
@@ -149,7 +162,8 @@ def start_node(version, release_dir, data_dir, repo_dir, tcp_port=DEFAULT_TRANSP
]
if version.startswith('0.') or version.startswith('1.0.0.Beta') :
cmd.append('-f') # version before 1.0 start in background automatically
- return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ env=dict(os.environ, ES_JAVA_OPTS='-Dmapper.allow_dots_in_name=true'))
def install_plugin(version, release_dir, plugin_name):
run_plugin(version, release_dir, 'install', [plugin_name])
@@ -248,6 +262,16 @@ def generate_index(client, version, index_name):
'auto_boost': True
}
}
+ mappings['doc'] = {'properties' : {}}
+ supports_dots_in_field_names = parse_version(version) >= parse_version("2.4.0")
+ if supports_dots_in_field_names:
+ mappings["doc"]['properties'].update({
+ 'field.with.dots': {
+ 'type': 'string',
+ 'boost': 4
+ }
+ })
+
if parse_version(version) < parse_version("5.0.0-alpha1"):
mappings['norms'] = {
'properties': {
@@ -291,14 +315,12 @@ def generate_index(client, version, index_name):
}
}
}
- mappings['doc'] = {
- 'properties': {
+ mappings['doc']['properties'].update({
'string': {
'type': 'text',
'boost': 4
}
- }
- }
+ })
settings = {
'number_of_shards': 1,
@@ -326,9 +348,10 @@ def generate_index(client, version, index_name):
# lighter index for it to keep bw tests reasonable
# see https://github.com/elastic/elasticsearch/issues/5817
num_docs = int(num_docs / 10)
- index_documents(client, index_name, 'doc', num_docs)
+ index_documents(client, index_name, 'doc', num_docs, supports_dots_in_field_names)
logging.info('Running basic asserts on the data added')
run_basic_asserts(client, index_name, 'doc', num_docs)
+ return num_docs, supports_dots_in_field_names
def snapshot_index(client, version, repo_dir):
persistent = {
@@ -438,7 +461,7 @@ def create_bwc_index(cfg, version):
node = start_node(version, release_dir, data_dir, repo_dir, cfg.tcp_port, cfg.http_port)
client = create_client(cfg.http_port)
index_name = 'index-%s' % version.lower()
- generate_index(client, version, index_name)
+ num_docs, supports_dots_in_field_names = generate_index(client, version, index_name)
if snapshot_supported:
snapshot_index(client, version, repo_dir)
@@ -447,6 +470,7 @@ def create_bwc_index(cfg, version):
# will already have the deletions applied on upgrade.
if version.startswith('0.') or version.startswith('1.'):
delete_by_query(client, version, index_name, 'doc')
+ reindex_docs(client, index_name, 'doc', min(100, num_docs), supports_dots_in_field_names)
shutdown_node(node)
node = None
@@ -464,7 +488,7 @@ def create_bwc_index(cfg, version):
def shutdown_node(node):
logging.info('Shutting down node with pid %d', node.pid)
- node.terminate()
+ node.kill() # don't use terminate otherwise we flush the translog
node.wait()
def parse_version(version):
diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py
index 33abbf9634..420ef35f36 100644
--- a/dev-tools/smoke_test_rc.py
+++ b/dev-tools/smoke_test_rc.py
@@ -65,9 +65,11 @@ DEFAULT_PLUGINS = ["analysis-icu",
"analysis-stempel",
"discovery-azure-classic",
"discovery-ec2",
+ "discovery-file",
"discovery-gce",
"ingest-attachment",
"ingest-geoip",
+ "ingest-user-agent",
"lang-javascript",
"lang-python",
"mapper-attachments",
@@ -131,6 +133,13 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
try:
downloaded_files = []
print(' ' + '*' * 80)
+ # here we create a temp gpg home where we download the release key as the only key into
+ # when we verify the signature it will fail if the signed key is not in the keystore and that
+ # way we keep the executing host unmodified since we don't have to import the key into the default keystore
+ gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
+ os.makedirs(gpg_home_dir, 0o700)
+ run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
+
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
@@ -139,7 +148,6 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
- os.makedirs(current_artifact_dir)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
@@ -155,12 +163,6 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
- # here we create a temp gpg home where we download the release key as the only key into
- # when we verify the signature it will fail if the signed key is not in the keystore and that
- # way we keep the executing host unmodified since we don't have to import the key into the default keystore
- gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir")
- os.makedirs(gpg_home_dir, 0o700)
- run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
@@ -172,7 +174,7 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
-def smoke_test_release(release, files, expected_hash, plugins):
+def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
@@ -191,7 +193,7 @@ def smoke_test_release(release, files, expected_hash, plugins):
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
- run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), expected_hash, es_plugin_path, 'install -b', plugin))
+ run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
@@ -222,8 +224,6 @@ def smoke_test_release(release, files, expected_hash, plugins):
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
- if expected_hash != version['build_hash'].strip():
- raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers)
res = conn.getresponse()
@@ -262,7 +262,7 @@ if __name__ == "__main__":
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
- help='The sha1 short hash of the git commit to smoketest')
+ help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
@@ -277,16 +277,16 @@ if __name__ == "__main__":
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
- 'org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz',
- 'org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip',
- 'org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb',
- 'org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm'
+ 'elasticsearch-%(version)s.tar.gz',
+ 'elasticsearch-%(version)s.zip',
+ 'elasticsearch-%(version)s.deb',
+ 'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
- download_url = '%s/%s-%s' % ('http://download.elasticsearch.org/elasticsearch/staging', version, hash)
+ download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
diff --git a/distribution/build.gradle b/distribution/build.gradle
index bfbf96b5d2..fe172620b5 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -132,13 +132,6 @@ subprojects {
// note: the group must be correct before applying the nexus plugin, or it will capture the wrong value...
project.group = "org.elasticsearch.distribution.${project.name}"
project.archivesBaseName = 'elasticsearch'
- apply plugin: 'com.bmuschko.nexus'
- // we must create our own install task, because it is only added when the java plugin is added
- task install(type: Upload, description: "Installs the 'archives' artifacts into the local Maven repository.", group: 'Upload') {
- configuration = configurations.archives
- MavenRepositoryHandlerConvention repositoriesHandler = (MavenRepositoryHandlerConvention)getRepositories().getConvention().getPlugin(MavenRepositoryHandlerConvention)
- repositoriesHandler.mavenInstaller()
- }
// TODO: the map needs to be an input of the tasks, so that when it changes, the task will re-run...
/*****************************************************************************
@@ -191,16 +184,11 @@ subprojects {
/*****************************************************************************
* Publishing setup *
*****************************************************************************/
- BuildPlugin.configurePomGeneration(project)
- apply plugin: 'nebula.info-scm'
- apply plugin: 'nebula.maven-base-publish'
- apply plugin: 'nebula.maven-scm'
- publishing {
- publications {
- nebula {
- artifactId 'elasticsearch'
- }
- }
+ if (['zip', 'integ-test-zip'].contains(it.name)) {
+ BuildPlugin.configurePomGeneration(project)
+ apply plugin: 'nebula.info-scm'
+ apply plugin: 'nebula.maven-base-publish'
+ apply plugin: 'nebula.maven-scm'
}
}
@@ -295,7 +283,7 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) {
that'll happen when createEtc runs. */
outputs.file "${packagingFiles}/etc/elasticsearch/elasticsearch.yml"
outputs.file "${packagingFiles}/etc/elasticsearch/jvm.options"
- outputs.file "${packagingFiles}/etc/elasticsearch/logging.yml"
+ outputs.file "${packagingFiles}/etc/elasticsearch/log4j2.properties"
}
task createPidDir(type: EmptyDirTask) {
@@ -375,7 +363,7 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) {
configurationFile '/etc/elasticsearch/elasticsearch.yml'
configurationFile '/etc/elasticsearch/jvm.options'
- configurationFile '/etc/elasticsearch/logging.yml'
+ configurationFile '/etc/elasticsearch/log4j2.properties'
into('/etc/elasticsearch') {
fileMode 0750
permissionGroup 'elasticsearch'
@@ -479,6 +467,8 @@ task run(type: RunTask) {
* </dl>
*/
Map<String, String> expansionsForDistribution(distributionType) {
+ final String defaultHeapSize = "2g"
+
String footer = "# Built for ${project.name}-${project.version} " +
"(${distributionType})"
Map<String, Object> expansions = [
@@ -488,6 +478,7 @@ Map<String, String> expansionsForDistribution(distributionType) {
'path.conf': [
'tar': '$ES_HOME/config',
'zip': '$ES_HOME/config',
+ 'integ-test-zip': '$ES_HOME/config',
'def': '/etc/elasticsearch',
],
'path.env': [
@@ -498,8 +489,8 @@ Map<String, String> expansionsForDistribution(distributionType) {
'def': '',
],
- 'heap.min': "256m",
- 'heap.max': "2g",
+ 'heap.min': defaultHeapSize,
+ 'heap.max': defaultHeapSize,
'stopping.timeout': [
'rpm': 86400,
diff --git a/distribution/integ-test-zip/build.gradle b/distribution/integ-test-zip/build.gradle
index f1f3a2c26c..ae4a499efd 100644
--- a/distribution/integ-test-zip/build.gradle
+++ b/distribution/integ-test-zip/build.gradle
@@ -17,6 +17,8 @@
* under the License.
*/
+import org.elasticsearch.gradle.plugin.PluginBuildPlugin
+
task buildZip(type: Zip) {
baseName = 'elasticsearch'
with archivesFiles
@@ -30,8 +32,31 @@ artifacts {
publishing {
publications {
nebula {
+ artifactId 'elasticsearch'
artifact buildZip
}
+ /* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts
+ * when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files
+ * for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch
+ * under the various other subprojects. So here we create another publication using the same
+ * name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks
+ * in alphabetical order. This lets us publish the zip file and even though the pom says the
+ * type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the
+ * publishing tasks are created *extremely* late in the configuration phase, so that we cannot get
+ * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
+ * maven local work, since we publish to maven central externally. */
+ nebulaRealPom(MavenPublication) {
+ artifactId 'elasticsearch'
+ pom.packaging = 'pom'
+ pom.withXml { XmlProvider xml ->
+ Node root = xml.asNode()
+ root.appendNode('name', 'Elasticsearch')
+ root.appendNode('description', 'A Distributed RESTful Search Engine')
+ root.appendNode('url', PluginBuildPlugin.urlFromOrigin(project.scminfo.origin))
+ Node scmNode = root.appendNode('scm')
+ scmNode.appendNode('url', project.scminfo.origin)
+ }
+ }
}
}
diff --git a/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1 b/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1
deleted file mode 100644
index 147721b891..0000000000
--- a/distribution/licenses/apache-log4j-extras-1.2.17.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-85863614d82185d7e51fe21c00aa9117a523a8b6
diff --git a/distribution/licenses/apache-log4j-extras-NOTICE b/distribution/licenses/apache-log4j-extras-NOTICE
deleted file mode 100644
index e02b7500ec..0000000000
--- a/distribution/licenses/apache-log4j-extras-NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Extras Companion for log4j 1.2.
-Copyright 2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file
diff --git a/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1
new file mode 100644
index 0000000000..5bf4bcab46
--- /dev/null
+++ b/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1
@@ -0,0 +1 @@
+3b4c5a8b734b6a29b2f03380535a48da6284b210 \ No newline at end of file
diff --git a/distribution/licenses/log4j-1.2.17.jar.sha1 b/distribution/licenses/log4j-1.2.17.jar.sha1
deleted file mode 100644
index 383110e29f..0000000000
--- a/distribution/licenses/log4j-1.2.17.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5af35056b4d257e4b64b9e8069c0746e8b08629f
diff --git a/distribution/licenses/apache-log4j-extras-LICENSE b/distribution/licenses/log4j-LICENSE.txt
index 6279e5206d..6279e5206d 100644
--- a/distribution/licenses/apache-log4j-extras-LICENSE
+++ b/distribution/licenses/log4j-LICENSE.txt
diff --git a/distribution/licenses/log4j-NOTICE b/distribution/licenses/log4j-NOTICE.txt
index 0375732360..0375732360 100644
--- a/distribution/licenses/log4j-NOTICE
+++ b/distribution/licenses/log4j-NOTICE.txt
diff --git a/distribution/licenses/log4j-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-api-2.6.2.jar.sha1
new file mode 100644
index 0000000000..e4f9af7497
--- /dev/null
+++ b/distribution/licenses/log4j-api-2.6.2.jar.sha1
@@ -0,0 +1 @@
+bd1b74a5d170686362091c7cf596bbc3adf5c09b \ No newline at end of file
diff --git a/distribution/licenses/log4j-LICENSE b/distribution/licenses/log4j-api-LICENSE.txt
index 6279e5206d..6279e5206d 100644
--- a/distribution/licenses/log4j-LICENSE
+++ b/distribution/licenses/log4j-api-LICENSE.txt
diff --git a/distribution/licenses/log4j-api-NOTICE.txt b/distribution/licenses/log4j-api-NOTICE.txt
new file mode 100644
index 0000000000..0375732360
--- /dev/null
+++ b/distribution/licenses/log4j-api-NOTICE.txt
@@ -0,0 +1,5 @@
+Apache log4j
+Copyright 2007 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file
diff --git a/distribution/licenses/log4j-core-2.6.2.jar.sha1 b/distribution/licenses/log4j-core-2.6.2.jar.sha1
new file mode 100644
index 0000000000..0ac4323411
--- /dev/null
+++ b/distribution/licenses/log4j-core-2.6.2.jar.sha1
@@ -0,0 +1 @@
+00a91369f655eb1639c6aece5c5eb5108db18306 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/commons-codec-LICENSE.txt b/distribution/licenses/log4j-core-LICENSE.txt
index 57bc88a15a..6279e5206d 100644
--- a/plugins/mapper-attachments/licenses/commons-codec-LICENSE.txt
+++ b/distribution/licenses/log4j-core-LICENSE.txt
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -186,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright [yyyy] [name of copyright owner]
+ Copyright 1999-2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -199,4 +200,3 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
diff --git a/distribution/licenses/log4j-core-NOTICE.txt b/distribution/licenses/log4j-core-NOTICE.txt
new file mode 100644
index 0000000000..0375732360
--- /dev/null
+++ b/distribution/licenses/log4j-core-NOTICE.txt
@@ -0,0 +1,5 @@
+Apache log4j
+Copyright 2007 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file
diff --git a/distribution/licenses/lucene-analyzers-common-6.1.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.1.0.jar.sha1
deleted file mode 100644
index 382bf79e91..0000000000
--- a/distribution/licenses/lucene-analyzers-common-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-20c3c3a717a225df0b462216e70a57922a8edd28 \ No newline at end of file
diff --git a/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1
new file mode 100644
index 0000000000..57aec3f4ac
--- /dev/null
+++ b/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1
@@ -0,0 +1 @@
+d254d52dd394b5079129f3d5f3bf4f2d44a5936e \ No newline at end of file
diff --git a/distribution/licenses/lucene-backward-codecs-6.1.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.1.0.jar.sha1
deleted file mode 100644
index f13099389d..0000000000
--- a/distribution/licenses/lucene-backward-codecs-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cbbba4d2d0c1469e0cc3358489b72922ba4963bf \ No newline at end of file
diff --git a/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1
new file mode 100644
index 0000000000..04aefc62f6
--- /dev/null
+++ b/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1
@@ -0,0 +1 @@
+b625bb21456b3c0d1e5e431bced125cb060c1abd \ No newline at end of file
diff --git a/distribution/licenses/lucene-core-6.1.0.jar.sha1 b/distribution/licenses/lucene-core-6.1.0.jar.sha1
deleted file mode 100644
index 5c0b798a77..0000000000
--- a/distribution/licenses/lucene-core-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-763361bd8cb48161ae28d67a7ca20c4abf194ecb \ No newline at end of file
diff --git a/distribution/licenses/lucene-core-6.2.0.jar.sha1 b/distribution/licenses/lucene-core-6.2.0.jar.sha1
new file mode 100644
index 0000000000..2d74124e62
--- /dev/null
+++ b/distribution/licenses/lucene-core-6.2.0.jar.sha1
@@ -0,0 +1 @@
+849ee62525a294416802be2cacc66c80352f6f13 \ No newline at end of file
diff --git a/distribution/licenses/lucene-grouping-6.1.0.jar.sha1 b/distribution/licenses/lucene-grouping-6.1.0.jar.sha1
deleted file mode 100644
index 74a68a00f0..0000000000
--- a/distribution/licenses/lucene-grouping-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c40e8a1904557f67e3d14462d64865cf4a838123 \ No newline at end of file
diff --git a/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 b/distribution/licenses/lucene-grouping-6.2.0.jar.sha1
new file mode 100644
index 0000000000..6ba525a038
--- /dev/null
+++ b/distribution/licenses/lucene-grouping-6.2.0.jar.sha1
@@ -0,0 +1 @@
+9527fedfd5acc624b2bb3f862bd99fb8f470b053 \ No newline at end of file
diff --git a/distribution/licenses/lucene-highlighter-6.1.0.jar.sha1 b/distribution/licenses/lucene-highlighter-6.1.0.jar.sha1
deleted file mode 100644
index b26247fef5..0000000000
--- a/distribution/licenses/lucene-highlighter-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6ab7c27256d3cd51022fb7130eb3e92391f24cdc \ No newline at end of file
diff --git a/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 b/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1
new file mode 100644
index 0000000000..c258e3fb85
--- /dev/null
+++ b/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1
@@ -0,0 +1 @@
+7ca342372a3f45e32bbd21cecaa757e38eccb8a5 \ No newline at end of file
diff --git a/distribution/licenses/lucene-join-6.1.0.jar.sha1 b/distribution/licenses/lucene-join-6.1.0.jar.sha1
deleted file mode 100644
index 2198e45313..0000000000
--- a/distribution/licenses/lucene-join-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-63411cef454a282c9ead56b25e0ca87daecbcf77 \ No newline at end of file
diff --git a/distribution/licenses/lucene-join-6.2.0.jar.sha1 b/distribution/licenses/lucene-join-6.2.0.jar.sha1
new file mode 100644
index 0000000000..01989e96a5
--- /dev/null
+++ b/distribution/licenses/lucene-join-6.2.0.jar.sha1
@@ -0,0 +1 @@
+da0b8de98511abd4fe9c7d48a353d17854c5ed46 \ No newline at end of file
diff --git a/distribution/licenses/lucene-memory-6.1.0.jar.sha1 b/distribution/licenses/lucene-memory-6.1.0.jar.sha1
deleted file mode 100644
index 7937bebc51..0000000000
--- a/distribution/licenses/lucene-memory-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e6702402615fcca549457842a08d21c35375a632 \ No newline at end of file
diff --git a/distribution/licenses/lucene-memory-6.2.0.jar.sha1 b/distribution/licenses/lucene-memory-6.2.0.jar.sha1
new file mode 100644
index 0000000000..b8a4a87efe
--- /dev/null
+++ b/distribution/licenses/lucene-memory-6.2.0.jar.sha1
@@ -0,0 +1 @@
+bc9e075b1ee051c8e5246c237c38d8e71dab8a66 \ No newline at end of file
diff --git a/distribution/licenses/lucene-misc-6.1.0.jar.sha1 b/distribution/licenses/lucene-misc-6.1.0.jar.sha1
deleted file mode 100644
index 87e8d94d46..0000000000
--- a/distribution/licenses/lucene-misc-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9a4884f1eaa7ac8af4abb51b45d0175ef7a3e03f \ No newline at end of file
diff --git a/distribution/licenses/lucene-misc-6.2.0.jar.sha1 b/distribution/licenses/lucene-misc-6.2.0.jar.sha1
new file mode 100644
index 0000000000..f4e081865a
--- /dev/null
+++ b/distribution/licenses/lucene-misc-6.2.0.jar.sha1
@@ -0,0 +1 @@
+94ddde6312566a4da4a50a88e453b6c82c759b41 \ No newline at end of file
diff --git a/distribution/licenses/lucene-queries-6.1.0.jar.sha1 b/distribution/licenses/lucene-queries-6.1.0.jar.sha1
deleted file mode 100644
index 687a6fa1ce..0000000000
--- a/distribution/licenses/lucene-queries-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2239ddd94a44d627b667221bdf129681af85ba51 \ No newline at end of file
diff --git a/distribution/licenses/lucene-queries-6.2.0.jar.sha1 b/distribution/licenses/lucene-queries-6.2.0.jar.sha1
new file mode 100644
index 0000000000..f7270a23af
--- /dev/null
+++ b/distribution/licenses/lucene-queries-6.2.0.jar.sha1
@@ -0,0 +1 @@
+dce47238f78e3e97d91dc6fefa9f46f07866bc2b \ No newline at end of file
diff --git a/distribution/licenses/lucene-queryparser-6.1.0.jar.sha1 b/distribution/licenses/lucene-queryparser-6.1.0.jar.sha1
deleted file mode 100644
index 69df3d6e69..0000000000
--- a/distribution/licenses/lucene-queryparser-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6b9e5e3dfc5b3c2689dcdc63d115c06be06c3837 \ No newline at end of file
diff --git a/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 b/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1
new file mode 100644
index 0000000000..8e95aa600e
--- /dev/null
+++ b/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1
@@ -0,0 +1 @@
+17ef728ac15e668bfa1105321611548424637645 \ No newline at end of file
diff --git a/distribution/licenses/lucene-sandbox-6.1.0.jar.sha1 b/distribution/licenses/lucene-sandbox-6.1.0.jar.sha1
deleted file mode 100644
index 5790e6e19b..0000000000
--- a/distribution/licenses/lucene-sandbox-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-53d35813f366d70ae0aef99d4f567d007290bdd2 \ No newline at end of file
diff --git a/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 b/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1
new file mode 100644
index 0000000000..1f34be3033
--- /dev/null
+++ b/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1
@@ -0,0 +1 @@
+520183f7b9aba77a26e224760c420a3844b0631a \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial-6.1.0.jar.sha1 b/distribution/licenses/lucene-spatial-6.1.0.jar.sha1
deleted file mode 100644
index 3031c7fbd9..0000000000
--- a/distribution/licenses/lucene-spatial-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-03ce415700267e5d329f2d01e599d13291aaef97 \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-6.2.0.jar.sha1
new file mode 100644
index 0000000000..22e81792e4
--- /dev/null
+++ b/distribution/licenses/lucene-spatial-6.2.0.jar.sha1
@@ -0,0 +1 @@
+8dba929b66927b936fbc76103b109ad6c824daee \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial-extras-6.1.0.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.1.0.jar.sha1
deleted file mode 100644
index 5d5c9fc40d..0000000000
--- a/distribution/licenses/lucene-spatial-extras-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-eed865fccebb3c0a1ec2bebba1eddaaf9295c385 \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1
new file mode 100644
index 0000000000..d5e8f379d7
--- /dev/null
+++ b/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1
@@ -0,0 +1 @@
+3b5a6ef5cd90c0218a72e9e2f7e60104be2447da \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial3d-6.1.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.1.0.jar.sha1
deleted file mode 100644
index 4735753ddc..0000000000
--- a/distribution/licenses/lucene-spatial3d-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-365a48f8d019aeeea34de1e80b03344fe3d4401b \ No newline at end of file
diff --git a/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1
new file mode 100644
index 0000000000..d0ce5275a2
--- /dev/null
+++ b/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1
@@ -0,0 +1 @@
+fcdb0567725722c5145149d1502848b6a96ec18d \ No newline at end of file
diff --git a/distribution/licenses/lucene-suggest-6.1.0.jar.sha1 b/distribution/licenses/lucene-suggest-6.1.0.jar.sha1
deleted file mode 100644
index 548cba1d04..0000000000
--- a/distribution/licenses/lucene-suggest-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e39f0e09ff1e3d9874a3b072294b80cf6567e431 \ No newline at end of file
diff --git a/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 b/distribution/licenses/lucene-suggest-6.2.0.jar.sha1
new file mode 100644
index 0000000000..39392ad115
--- /dev/null
+++ b/distribution/licenses/lucene-suggest-6.2.0.jar.sha1
@@ -0,0 +1 @@
+3d9d526c51f483d27f425c75d7e56bc0849242d6 \ No newline at end of file
diff --git a/distribution/rpm/build.gradle b/distribution/rpm/build.gradle
index 185f558ff0..a0dc33b9ad 100644
--- a/distribution/rpm/build.gradle
+++ b/distribution/rpm/build.gradle
@@ -36,6 +36,17 @@ task buildRpm(type: Rpm) {
fileMode 0644
addParentDirs false
// TODO ospackage doesn't support icon but we used to have one
+
+ // Declare the folders so that the RPM package manager removes
+ // them when upgrading or removing the package
+ directory('/usr/share/elasticsearch/bin', 0755)
+ directory('/usr/share/elasticsearch/lib', 0755)
+ directory('/usr/share/elasticsearch/modules', 0755)
+ modulesFiles.eachFile { FileCopyDetails fcp ->
+ if (fcp.name == "plugin-descriptor.properties") {
+ directory('/usr/share/elasticsearch/modules/' + fcp.file.parentFile.name, 0755)
+ }
+ }
}
artifacts {
diff --git a/distribution/src/main/packaging/scripts/prerm b/distribution/src/main/packaging/scripts/prerm
index 592f2f9948..bca03b2576 100644
--- a/distribution/src/main/packaging/scripts/prerm
+++ b/distribution/src/main/packaging/scripts/prerm
@@ -79,12 +79,13 @@ if [ "$REMOVE_SERVICE" = "true" ]; then
if command -v update-rc.d >/dev/null; then
update-rc.d elasticsearch remove >/dev/null || true
fi
-fi
-SCRIPTS_DIR="/etc/elasticsearch/scripts"
-# delete the scripts directory if and only if empty
-if [ -d "$SCRIPTS_DIR" ]; then
- rmdir --ignore-fail-on-non-empty "$SCRIPTS_DIR"
+ SCRIPTS_DIR="/etc/elasticsearch/scripts"
+ # delete the scripts directory if and only if empty
+ if [ -d "$SCRIPTS_DIR" ]; then
+ rmdir --ignore-fail-on-non-empty "$SCRIPTS_DIR"
+ fi
fi
+
${scripts.footer}
diff --git a/distribution/src/main/packaging/systemd/elasticsearch.service b/distribution/src/main/packaging/systemd/elasticsearch.service
index 8971bc2bc1..0554371a1f 100644
--- a/distribution/src/main/packaging/systemd/elasticsearch.service
+++ b/distribution/src/main/packaging/systemd/elasticsearch.service
@@ -21,10 +21,17 @@ ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec
ExecStart=/usr/share/elasticsearch/bin/elasticsearch \
-p ${PID_DIR}/elasticsearch.pid \
+ --quiet \
-Edefault.path.logs=${LOG_DIR} \
-Edefault.path.data=${DATA_DIR} \
-Edefault.path.conf=${CONF_DIR}
+# StandardOutput is configured to redirect to journalctl since
+# some error messages may be logged in standard output before
+# elasticsearch logging system is initialized. Elasticsearch
+# stores its logs in /var/log/elasticsearch and does not use
+# journalctl by default. If you also want to enable journalctl
+# logging, you can simply remove the "quiet" option from ExecStart.
StandardOutput=journal
StandardError=inherit
diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/main/resources/bin/elasticsearch
index b41304f70d..7b3d0ea198 100755
--- a/distribution/src/main/resources/bin/elasticsearch
+++ b/distribution/src/main/resources/bin/elasticsearch
@@ -54,35 +54,6 @@ EOF
exit 1
fi
-# TODO: remove for Elasticsearch 6.x
-unsupported_environment_variable() {
- if test -n "$1"; then
- echo "$2=$1: $3"
- fi
-}
-
-if test -n "$ES_MIN_MEM" ||
- test -n "$ES_MAX_MEM" ||
- test -n "$ES_HEAP_SIZE" ||
- test -n "$ES_HEAP_NEWSIZE" ||
- test -n "$ES_DIRECT_SIZE" ||
- test -n "$ES_USE_IPV4" ||
- test -n "$ES_GC_OPTS" ||
- test -n "$ES_GC_LOG_FILE"; then
- echo "Error: encountered environment variables that are no longer supported"
- echo "Use jvm.options or ES_JAVA_OPTS to configure the JVM"
- unsupported_environment_variable "$ES_MIN_MEM" ES_MIN_MEM "set -Xms$ES_MIN_MEM in jvm.options or add \"-Xms$ES_MIN_MEM\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_MAX_MEM" ES_MAX_MEM "set -Xmx$ES_MAX_MEM in jvm.options or add \"-Xmx$ES_MAX_MEM\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_HEAP_SIZE" ES_HEAP_SIZE "set -Xms$ES_HEAP_SIZE and -Xmx$ES_HEAP_SIZE in jvm.options or add \"-Xms$ES_HEAP_SIZE -Xmx$ES_HEAP_SIZE\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_HEAP_NEWSIZE" ES_HEAP_NEWSIZE "set -Xmn$ES_HEAP_NEWSIZE in jvm.options or add \"-Xmn$ES_HEAP_SIZE\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_DIRECT_SIZE" ES_DIRECT_SIZE "set -XX:MaxDirectMemorySize=$ES_DIRECT_SIZE in jvm.options or add \"-XX:MaxDirectMemorySize=$ES_DIRECT_SIZE\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_USE_IPV4" ES_USE_IPV4 "set -Djava.net.preferIPv4Stack=true in jvm.options or add \"-Djava.net.preferIPv4Stack=true\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_GC_OPTS" ES_GC_OPTS "set ${ES_GC_OPTS// / and } in jvm.options or add \"$ES_GC_OPTS\" to ES_JAVA_OPTS"
- unsupported_environment_variable "$ES_GC_LOG_FILE" ES_GC_LOG_FILE "set -Xloggc:$ES_GC_LOG_FILE in jvm.options or add \"-Xloggc:$ES_GC_LOG_FILE\" to ES_JAVA_OPTS"
- exit 1
-fi
-# end TODO: remove for Elasticsearch 6.x
-
parse_jvm_options() {
if [ -f "$1" ]; then
echo "$(grep "^-" "$1" | tr '\n' ' ')"
diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin b/distribution/src/main/resources/bin/elasticsearch-plugin
index 06f8c5b8c2..098d912449 100755
--- a/distribution/src/main/resources/bin/elasticsearch-plugin
+++ b/distribution/src/main/resources/bin/elasticsearch-plugin
@@ -82,9 +82,10 @@ HOSTNAME=`hostname | cut -d. -f1`
export HOSTNAME
declare -a args=("$@")
+path_props=(-Des.path.home="$ES_HOME")
if [ -e "$CONF_DIR" ]; then
- args=("${args[@]}" -Edefault.path.conf="$CONF_DIR")
+ path_props=("${path_props[@]}" -Des.path.conf="$CONF_DIR")
fi
-exec "$JAVA" $ES_JAVA_OPTS -Delasticsearch -Des.path.home="$ES_HOME" -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginCli "${args[@]}"
+exec "$JAVA" $ES_JAVA_OPTS -Delasticsearch "${path_props[@]}" -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginCli "${args[@]}"
diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin.bat b/distribution/src/main/resources/bin/elasticsearch-plugin.bat
index ba35ad1c21..58b749d6b8 100644
--- a/distribution/src/main/resources/bin/elasticsearch-plugin.bat
+++ b/distribution/src/main/resources/bin/elasticsearch-plugin.bat
@@ -17,45 +17,14 @@ for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
TITLE Elasticsearch Plugin Manager ${project.version}
-SET properties=
-SET args=
-
-:loop
-SET "current=%~1"
-SHIFT
-IF "x!current!" == "x" GOTO breakloop
-
-IF "!current:~0,2%!" == "-D" (
- ECHO "!current!" | FINDSTR /C:"=">nul && (
- :: current matches -D*=*
- IF "x!properties!" NEQ "x" (
- SET properties=!properties! "!current!"
- ) ELSE (
- SET properties="!current!"
- )
- ) || (
- :: current matches -D*
- IF "x!properties!" NEQ "x" (
- SET properties=!properties! "!current!=%~1"
- ) ELSE (
- SET properties="!current!=%~1"
- )
- SHIFT
- )
-) ELSE (
- :: current matches *
- IF "x!args!" NEQ "x" (
- SET args=!args! "!current!"
- ) ELSE (
- SET args="!current!"
- )
+SET path_props=-Des.path.home="%ES_HOME%"
+IF DEFINED CONF_DIR (
+ SET path_props=!path_props! -Des.path.conf="%CONF_DIR%"
)
-GOTO loop
-:breakloop
-
+SET args=%*
SET HOSTNAME=%COMPUTERNAME%
-"%JAVA%" %ES_JAVA_OPTS% -Des.path.home="%ES_HOME%" !properties! -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginCli" !args!
+"%JAVA%" %ES_JAVA_OPTS% !path_props! -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginCli" !args!
ENDLOCAL
diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/elasticsearch-service.bat
index ccb2d2b7ef..609b8bda84 100644
--- a/distribution/src/main/resources/bin/service.bat
+++ b/distribution/src/main/resources/bin/elasticsearch-service.bat
@@ -3,31 +3,6 @@ SETLOCAL enabledelayedexpansion
TITLE Elasticsearch Service ${project.version}
-rem TODO: remove for Elasticsearch 6.x
-set bad_env_var=0
-if not "%ES_MIN_MEM%" == "" set bad_env_var=1
-if not "%ES_MAX_MEM%" == "" set bad_env_var=1
-if not "%ES_HEAP_SIZE%" == "" set bad_env_var=1
-if not "%ES_HEAP_NEWSIZE%" == "" set bad_env_var=1
-if not "%ES_DIRECT_SIZE%" == "" set bad_env_var=1
-if not "%ES_USE_IPV4%" == "" set bad_env_var=1
-if not "%ES_GC_OPTS%" == "" set bad_env_var=1
-if not "%ES_GC_LOG_FILE%" == "" set bad_env_var=1
-if %bad_env_var% == 1 (
- echo Error: encountered environment variables that are no longer supported
- echo Use jvm.options or ES_JAVA_OPTS to configure the JVM
- if not "%ES_MIN_MEM%" == "" echo ES_MIN_MEM=%ES_MIN_MEM%: set -Xms%ES_MIN_MEM% in jvm.options or add "-Xms%ES_MIN_MEM%" to ES_JAVA_OPTS
- if not "%ES_MAX_MEM%" == "" echo ES_MAX_MEM=%ES_MAX_MEM%: set -Xms%ES_MAX_MEM% in jvm.options or add "-Xmx%ES_MAX_MEM%" to ES_JAVA_OPTS
- if not "%ES_HEAP_SIZE%" == "" echo ES_HEAP_SIZE=%ES_HEAP_SIZE%: set -Xms%ES_HEAP_SIZE% and -Xmx%ES_HEAP_SIZE% in jvm.options or add "-Xms%ES_HEAP_SIZE% -Xmx%ES_HEAP_SIZE%" to ES_JAVA_OPTS
- if not "%ES_HEAP_NEWSIZE%" == "" echo ES_HEAP_NEWSIZE=%ES_HEAP_NEWSIZE%: set -Xmn%ES_HEAP_NEWSIZE% in jvm.options or add "-Xmn%ES_HEAP_SIZE%" to ES_JAVA_OPTS
- if not "%ES_DIRECT_SIZE%" == "" echo ES_DIRECT_SIZE=%ES_DIRECT_SIZE%: set -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE% in jvm.options or add "-XX:MaxDirectMemorySize=%ES_DIRECT_SIZE%" to ES_JAVA_OPTS
- if not "%ES_USE_IPV4%" == "" echo ES_USE_IPV4=%ES_USE_IPV4%: set -Djava.net.preferIPv4Stack=true in jvm.options or add "-Djava.net.preferIPv4Stack=true" to ES_JAVA_OPTS
- if not "%ES_GC_OPTS%" == "" echo ES_GC_OPTS=%ES_GC_OPTS%: set %ES_GC_OPTS: = and % in jvm.options or add "%ES_GC_OPTS%" to ES_JAVA_OPTS
- if not "%ES_GC_LOG_FILE%" == "" echo ES_GC_LOG_FILE=%ES_GC_LOG_FILE%: set -Xloggc:%ES_GC_LOG_FILE% in jvm.options or add "-Xloggc:%ES_GC_LOG_FILE%" to ES_JAVA_OPTS"
- exit /b 1
-)
-rem end TODO: remove for Elasticsearch 6.x
-
IF DEFINED JAVA_HOME (
SET JAVA=%JAVA_HOME%\bin\java.exe
) ELSE (
@@ -100,7 +75,7 @@ echo Unknown option "%SERVICE_CMD%"
:displayUsage
echo.
-echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
+echo Usage: elasticsearch-service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
goto:eof
:doStart
diff --git a/distribution/src/main/resources/bin/elasticsearch.bat b/distribution/src/main/resources/bin/elasticsearch.bat
index 416bb37c85..35dbcf8e17 100644
--- a/distribution/src/main/resources/bin/elasticsearch.bat
+++ b/distribution/src/main/resources/bin/elasticsearch.bat
@@ -5,31 +5,6 @@ TITLE Elasticsearch ${project.version}
SET params='%*'
-rem TODO: remove for Elasticsearch 6.x
-set bad_env_var=0
-if not "%ES_MIN_MEM%" == "" set bad_env_var=1
-if not "%ES_MAX_MEM%" == "" set bad_env_var=1
-if not "%ES_HEAP_SIZE%" == "" set bad_env_var=1
-if not "%ES_HEAP_NEWSIZE%" == "" set bad_env_var=1
-if not "%ES_DIRECT_SIZE%" == "" set bad_env_var=1
-if not "%ES_USE_IPV4%" == "" set bad_env_var=1
-if not "%ES_GC_OPTS%" == "" set bad_env_var=1
-if not "%ES_GC_LOG_FILE%" == "" set bad_env_var=1
-if %bad_env_var% == 1 (
- echo Error: encountered environment variables that are no longer supported
- echo Use jvm.options or ES_JAVA_OPTS to configure the JVM
- if not "%ES_MIN_MEM%" == "" echo ES_MIN_MEM=%ES_MIN_MEM%: set -Xms%ES_MIN_MEM% in jvm.options or add "-Xms%ES_MIN_MEM%" to ES_JAVA_OPTS
- if not "%ES_MAX_MEM%" == "" echo ES_MAX_MEM=%ES_MAX_MEM%: set -Xmx%ES_MAX_MEM% in jvm.options or add "-Xmx%ES_MAX_MEM%" to ES_JAVA_OPTS
- if not "%ES_HEAP_SIZE%" == "" echo ES_HEAP_SIZE=%ES_HEAP_SIZE%: set -Xms%ES_HEAP_SIZE% and -Xmx%ES_HEAP_SIZE% in jvm.options or add "-Xms%ES_HEAP_SIZE% -Xmx%ES_HEAP_SIZE%" to ES_JAVA_OPTS
- if not "%ES_HEAP_NEWSIZE%" == "" echo ES_HEAP_NEWSIZE=%ES_HEAP_NEWSIZE%: set -Xmn%ES_HEAP_NEWSIZE% in jvm.options or add "-Xmn%ES_HEAP_SIZE%" to ES_JAVA_OPTS
- if not "%ES_DIRECT_SIZE%" == "" echo ES_DIRECT_SIZE=%ES_DIRECT_SIZE%: set -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE% in jvm.options or add "-XX:MaxDirectMemorySize=%ES_DIRECT_SIZE%" to ES_JAVA_OPTS
- if not "%ES_USE_IPV4%" == "" echo ES_USE_IPV4=%ES_USE_IPV4%: set -Djava.net.preferIPv4Stack=true in jvm.options or add "-Djava.net.preferIPv4Stack=true" to ES_JAVA_OPTS
- if not "%ES_GC_OPTS%" == "" echo ES_GC_OPTS=%ES_GC_OPTS%: set %ES_GC_OPTS: = and % in jvm.options or add "%ES_GC_OPTS%" to ES_JAVA_OPTS
- if not "%ES_GC_LOG_FILE%" == "" echo ES_GC_LOG_FILE=%ES_GC_LOG_FILE%: set -Xloggc:%ES_GC_LOG_FILE% in jvm.options or add "-Xloggc:%ES_GC_LOG_FILE%" to ES_JAVA_OPTS"
- exit /b 1
-)
-rem end TODO: remove for Elasticsearch 6.x
-
:loop
FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO (
SET current=%%A
diff --git a/distribution/src/main/resources/config/elasticsearch.yml b/distribution/src/main/resources/config/elasticsearch.yml
index 27e038ab3b..0abff999bc 100644
--- a/distribution/src/main/resources/config/elasticsearch.yml
+++ b/distribution/src/main/resources/config/elasticsearch.yml
@@ -14,33 +14,33 @@
#
# Use a descriptive name for your cluster:
#
-# cluster.name: my-application
+#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
-# node.name: node-1
+#node.name: node-1
#
# Add custom attributes to the node:
#
-# node.rack: r1
+#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
-# path.data: /path/to/data
+#path.data: /path/to/data
#
# Path to log files:
#
-# path.logs: /path/to/logs
+#path.logs: /path/to/logs
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
-# bootstrap.memory_lock: true
+#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
@@ -52,11 +52,11 @@
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
-# network.host: 192.168.0.1
+#network.host: 192.168.0.1
#
# Set a custom port for HTTP:
#
-# http.port: 9200
+#http.port: 9200
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
@@ -66,11 +66,11 @@
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
-# discovery.zen.ping.unicast.hosts: ["host1", "host2"]
+#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
#
-# discovery.zen.minimum_master_nodes: 3
+#discovery.zen.minimum_master_nodes: 3
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
@@ -79,7 +79,7 @@
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
-# gateway.recover_after_nodes: 3
+#gateway.recover_after_nodes: 3
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
@@ -88,8 +88,8 @@
#
# Disable starting multiple nodes on a single system:
#
-# node.max_local_storage_nodes: 1
+#node.max_local_storage_nodes: 1
#
# Require explicit names when deleting indices:
#
-# action.destructive_requires_name: true
+#action.destructive_requires_name: true
diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options
index 2feba02550..63245f172b 100644
--- a/distribution/src/main/resources/config/jvm.options
+++ b/distribution/src/main/resources/config/jvm.options
@@ -59,8 +59,14 @@
# use our provided JNA always versus the system one
-Djna.nosys=true
-# flag to explicitly tell Netty to not use unsafe
+# flags to keep Netty from being unsafe
-Dio.netty.noUnsafe=true
+-Dio.netty.noKeySetOptimization=true
+
+# log4j 2
+-Dlog4j.shutdownHookEnabled=false
+-Dlog4j2.disable.jmx=true
+-Dlog4j.skipJansi=true
## heap dumps
@@ -84,11 +90,3 @@
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${loggc}
-
-# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
-# If documents were already indexed with unquoted fields in a previous version
-# of Elasticsearch, some operations may throw errors.
-#
-# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
-# only for migration purposes.
-#-Delasticsearch.json.allow_unquoted_field_names=true
diff --git a/distribution/src/main/resources/config/log4j2.properties b/distribution/src/main/resources/config/log4j2.properties
new file mode 100644
index 0000000000..9a3147f5a2
--- /dev/null
+++ b/distribution/src/main/resources/config/log4j2.properties
@@ -0,0 +1,74 @@
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
+appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
+appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
diff --git a/distribution/src/main/resources/config/logging.yml b/distribution/src/main/resources/config/logging.yml
deleted file mode 100644
index 11cd181ebd..0000000000
--- a/distribution/src/main/resources/config/logging.yml
+++ /dev/null
@@ -1,86 +0,0 @@
-# you can override using a command-line parameter
-# -E logger.level=(ERROR|WARN|INFO|DEBUG|TRACE)
-logger.level: INFO
-rootLogger: ${logger.level}, console, file
-logger:
- # log action execution errors for easier debugging
- action: DEBUG
-
- # deprecation logging, turn to DEBUG to see them
- deprecation: INFO, deprecation_log_file
-
- # reduce the logging for aws, too much is logged under the default INFO
- com.amazonaws: WARN
- # aws will try to do some sketchy JMX stuff, but its not needed.
- com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
- com.amazonaws.metrics.AwsSdkMetrics: ERROR
-
- org.apache.http: INFO
-
- # gateway
- #gateway: DEBUG
- #index.gateway: DEBUG
-
- # peer shard recovery
- #indices.recovery: DEBUG
-
- # discovery
- #discovery: TRACE
-
- index.search.slowlog: TRACE, index_search_slow_log_file
- index.indexing.slowlog: TRACE, index_indexing_slow_log_file
-
-additivity:
- index.search.slowlog: false
- index.indexing.slowlog: false
- deprecation: false
-
-appender:
- console:
- type: console
- layout:
- type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
-
- file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
-
- # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
- # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
- #file:
- #type: extrasRollingFile
- #file: ${path.logs}/${cluster.name}.log
- #rollingPolicy: timeBased
- #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
- #layout:
- #type: pattern
- #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
-
- deprecation_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_deprecation.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
-
- index_search_slow_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_index_search_slowlog.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
-
- index_indexing_slow_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/distribution/tar/build.gradle b/distribution/tar/build.gradle
index 7242565905..8e34a6ce9c 100644
--- a/distribution/tar/build.gradle
+++ b/distribution/tar/build.gradle
@@ -28,9 +28,5 @@ task buildTar(type: Tar) {
artifacts {
'default' buildTar
- project.afterEvaluate {
- // gradle is broken for extensions that contain a dot, so we must be explicit about the name of the .asc file
- project.signArchives.singleSignature.type = 'tar.gz.asc'
- }
}
diff --git a/distribution/zip/build.gradle b/distribution/zip/build.gradle
index 67f99aa884..ae4a499efd 100644
--- a/distribution/zip/build.gradle
+++ b/distribution/zip/build.gradle
@@ -17,6 +17,8 @@
* under the License.
*/
+import org.elasticsearch.gradle.plugin.PluginBuildPlugin
+
task buildZip(type: Zip) {
baseName = 'elasticsearch'
with archivesFiles
@@ -30,10 +32,32 @@ artifacts {
publishing {
publications {
nebula {
+ artifactId 'elasticsearch'
artifact buildZip
}
+ /* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts
+ * when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files
+ * for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch
+ * under the various other subprojects. So here we create another publication using the same
+ * name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks
+ * in alphabetical order. This lets us publish the zip file and even though the pom says the
+ * type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the
+ * publishing tasks are created *extremely* late in the configuration phase, so that we cannot get
+ * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
+ * maven local work, since we publish to maven central externally. */
+ nebulaRealPom(MavenPublication) {
+ artifactId 'elasticsearch'
+ pom.packaging = 'pom'
+ pom.withXml { XmlProvider xml ->
+ Node root = xml.asNode()
+ root.appendNode('name', 'Elasticsearch')
+ root.appendNode('description', 'A Distributed RESTful Search Engine')
+ root.appendNode('url', PluginBuildPlugin.urlFromOrigin(project.scminfo.origin))
+ Node scmNode = root.appendNode('scm')
+ scmNode.appendNode('url', project.scminfo.origin)
+ }
+ }
}
}
integTest.dependsOn buildZip
-
diff --git a/docs/README.asciidoc b/docs/README.asciidoc
index 5da211c662..fbf07811d8 100644
--- a/docs/README.asciidoc
+++ b/docs/README.asciidoc
@@ -37,6 +37,9 @@ are tests even if they don't have `// CONSOLE`.
`// TEST[continued]` you can make tests that contain multiple command snippets
and multiple response snippets.
* `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]`.
+ * `// TESTRESPONSE[_cat]`: Add substitutions for testing `_cat` responses. Use
+ this after all other substitutions so it doesn't make other substitutions
+ difficult.
* `// TESTSETUP`: Marks this snippet as the "setup" for all other snippets in
this file. This is a somewhat natural way of structuring documentation. You
say "this is the data we use to explain this feature" then you add the
diff --git a/docs/build.gradle b/docs/build.gradle
index caf7cfea01..2d550e1f19 100644
--- a/docs/build.gradle
+++ b/docs/build.gradle
@@ -19,11 +19,171 @@
apply plugin: 'elasticsearch.docs-test'
+/* List of files that have snippets that probably should be converted to
+ * `// CONSOLE` and `// TESTRESPONSE` but have yet to be converted. Try and
+ * only remove entries from this list. When it is empty we'll remove it
+ * entirely and have a party! There will be cake and everything.... */
+buildRestTests.expectedUnconvertedCandidates = [
+ 'reference/aggregations/bucket/datehistogram-aggregation.asciidoc',
+ 'reference/aggregations/bucket/daterange-aggregation.asciidoc',
+ 'reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc',
+ 'reference/aggregations/bucket/filter-aggregation.asciidoc',
+ 'reference/aggregations/bucket/filters-aggregation.asciidoc',
+ 'reference/aggregations/bucket/geodistance-aggregation.asciidoc',
+ 'reference/aggregations/bucket/geohashgrid-aggregation.asciidoc',
+ 'reference/aggregations/bucket/global-aggregation.asciidoc',
+ 'reference/aggregations/bucket/histogram-aggregation.asciidoc',
+ 'reference/aggregations/bucket/iprange-aggregation.asciidoc',
+ 'reference/aggregations/bucket/missing-aggregation.asciidoc',
+ 'reference/aggregations/bucket/nested-aggregation.asciidoc',
+ 'reference/aggregations/bucket/range-aggregation.asciidoc',
+ 'reference/aggregations/bucket/reverse-nested-aggregation.asciidoc',
+ 'reference/aggregations/bucket/sampler-aggregation.asciidoc',
+ 'reference/aggregations/bucket/significantterms-aggregation.asciidoc',
+ 'reference/aggregations/bucket/terms-aggregation.asciidoc',
+ 'reference/aggregations/matrix/stats-aggregation.asciidoc',
+ 'reference/aggregations/metrics/avg-aggregation.asciidoc',
+ 'reference/aggregations/metrics/cardinality-aggregation.asciidoc',
+ 'reference/aggregations/metrics/extendedstats-aggregation.asciidoc',
+ 'reference/aggregations/metrics/geobounds-aggregation.asciidoc',
+ 'reference/aggregations/metrics/geocentroid-aggregation.asciidoc',
+ 'reference/aggregations/metrics/max-aggregation.asciidoc',
+ 'reference/aggregations/metrics/min-aggregation.asciidoc',
+ 'reference/aggregations/metrics/percentile-aggregation.asciidoc',
+ 'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc',
+ 'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc',
+ 'reference/aggregations/metrics/stats-aggregation.asciidoc',
+ 'reference/aggregations/metrics/sum-aggregation.asciidoc',
+ 'reference/aggregations/metrics/tophits-aggregation.asciidoc',
+ 'reference/aggregations/metrics/valuecount-aggregation.asciidoc',
+ 'reference/aggregations/pipeline.asciidoc',
+ 'reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/bucket-script-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/derivative-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/max-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/min-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/movavg-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/serial-diff-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc',
+ 'reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc',
+ 'reference/analysis/analyzers/lang-analyzer.asciidoc',
+ 'reference/analysis/analyzers/pattern-analyzer.asciidoc',
+ 'reference/analysis/charfilters/htmlstrip-charfilter.asciidoc',
+ 'reference/analysis/charfilters/pattern-replace-charfilter.asciidoc',
+ 'reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/elision-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc',
+ 'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc',
+ 'reference/cat.asciidoc',
+ 'reference/cat/alias.asciidoc',
+ 'reference/cat/allocation.asciidoc',
+ 'reference/cat/count.asciidoc',
+ 'reference/cat/fielddata.asciidoc',
+ 'reference/cat/health.asciidoc',
+ 'reference/cat/indices.asciidoc',
+ 'reference/cat/master.asciidoc',
+ 'reference/cat/nodeattrs.asciidoc',
+ 'reference/cat/nodes.asciidoc',
+ 'reference/cat/pending_tasks.asciidoc',
+ 'reference/cat/plugins.asciidoc',
+ 'reference/cat/recovery.asciidoc',
+ 'reference/cat/repositories.asciidoc',
+ 'reference/cat/segments.asciidoc',
+ 'reference/cat/shards.asciidoc',
+ 'reference/cat/snapshots.asciidoc',
+ 'reference/cat/templates.asciidoc',
+ 'reference/cat/thread_pool.asciidoc',
+ 'reference/cluster/allocation-explain.asciidoc',
+ 'reference/cluster/nodes-info.asciidoc',
+ 'reference/cluster/nodes-stats.asciidoc',
+ 'reference/cluster/pending.asciidoc',
+ 'reference/cluster/reroute.asciidoc',
+ 'reference/cluster/state.asciidoc',
+ 'reference/cluster/stats.asciidoc',
+ 'reference/cluster/tasks.asciidoc',
+ 'reference/cluster/update-settings.asciidoc',
+ 'reference/docs/bulk.asciidoc',
+ 'reference/docs/delete-by-query.asciidoc',
+ 'reference/docs/delete.asciidoc',
+ 'reference/docs/index_.asciidoc',
+ 'reference/docs/multi-get.asciidoc',
+ 'reference/docs/multi-termvectors.asciidoc',
+ 'reference/docs/reindex.asciidoc',
+ 'reference/docs/termvectors.asciidoc',
+ 'reference/docs/update-by-query.asciidoc',
+ 'reference/docs/update.asciidoc',
+ 'reference/getting-started.asciidoc',
+ 'reference/index-modules/similarity.asciidoc',
+ 'reference/index-modules/store.asciidoc',
+ 'reference/index-modules/translog.asciidoc',
+ 'reference/indices/analyze.asciidoc',
+ 'reference/indices/flush.asciidoc',
+ 'reference/indices/get-field-mapping.asciidoc',
+ 'reference/indices/get-settings.asciidoc',
+ 'reference/indices/put-mapping.asciidoc',
+ 'reference/indices/recovery.asciidoc',
+ 'reference/indices/segments.asciidoc',
+ 'reference/indices/shadow-replicas.asciidoc',
+ 'reference/indices/shard-stores.asciidoc',
+ 'reference/indices/update-settings.asciidoc',
+ 'reference/indices/upgrade.asciidoc',
+ 'reference/ingest/ingest-node.asciidoc',
+ 'reference/mapping/dynamic/templates.asciidoc',
+ 'reference/mapping/fields/all-field.asciidoc',
+ 'reference/mapping/params/analyzer.asciidoc',
+ 'reference/mapping/types/binary.asciidoc',
+ 'reference/mapping/types/geo-point.asciidoc',
+ 'reference/mapping/types/geo-shape.asciidoc',
+ 'reference/mapping/types/ip.asciidoc',
+ 'reference/mapping/types/nested.asciidoc',
+ 'reference/mapping/types/object.asciidoc',
+ 'reference/mapping/types/percolator.asciidoc',
+ 'reference/modules/scripting/native.asciidoc',
+ 'reference/modules/scripting/security.asciidoc',
+ 'reference/modules/scripting/using.asciidoc',
+ 'reference/modules/transport.asciidoc',
+ 'reference/query-dsl/exists-query.asciidoc',
+ 'reference/query-dsl/function-score-query.asciidoc',
+ 'reference/query-dsl/geo-shape-query.asciidoc',
+ 'reference/query-dsl/terms-query.asciidoc',
+ 'reference/search/field-stats.asciidoc',
+ 'reference/search/multi-search.asciidoc',
+ 'reference/search/profile.asciidoc',
+ 'reference/search/request/highlighting.asciidoc',
+ 'reference/search/request/inner-hits.asciidoc',
+ 'reference/search/request/rescore.asciidoc',
+ 'reference/search/request/scroll.asciidoc',
+ 'reference/search/search-template.asciidoc',
+ 'reference/search/suggesters/completion-suggest.asciidoc',
+]
+
integTest {
cluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
+ /* Enable regexes in painless so our tests don't complain about example
+ * snippets that use them. */
+ setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it"
}
@@ -65,18 +225,18 @@ buildRestTests.docs = fileTree(projectDir) {
Closure setupTwitter = { String name, int count ->
buildRestTests.setups[name] = '''
- do:
- indices.create:
- index: twitter
- body:
- settings:
- number_of_shards: 1
- number_of_replicas: 1
+ indices.create:
+ index: twitter
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 1
- do:
- bulk:
- index: twitter
- type: tweet
- refresh: true
- body: |'''
+ bulk:
+ index: twitter
+ type: tweet
+ refresh: true
+ body: |'''
for (int i = 0; i < count; i++) {
String user, text
if (i == 0) {
@@ -87,12 +247,13 @@ Closure setupTwitter = { String name, int count ->
text = "some message with the number $i"
}
buildRestTests.setups[name] += """
- {"index":{"_id": "$i"}}
- {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
+ {"index":{"_id": "$i"}}
+ {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
}
}
setupTwitter('twitter', 5)
setupTwitter('big_twitter', 120)
+setupTwitter('huge_twitter', 1200)
buildRestTests.setups['host'] = '''
# Fetch the http host. We use the host of the master because we know there will always be a master.
diff --git a/docs/groovy-api/anatomy.asciidoc b/docs/groovy-api/anatomy.asciidoc
index 33d8ef72f3..99e008fb6e 100644
--- a/docs/groovy-api/anatomy.asciidoc
+++ b/docs/groovy-api/anatomy.asciidoc
@@ -17,7 +17,7 @@ elasticsearch Java `ActionFuture` (in turn a nicer extension to Java own
`Future`) which allows to register listeners (closures) on it for
success and failures, as well as blocking for the response. For example:
-[source,js]
+[source,groovy]
--------------------------------------------------
def indexR = client.index {
index "test"
@@ -38,7 +38,7 @@ println "Indexed $indexR.response.id into $indexR.response.index/$indexR.respons
In the above example, calling `indexR.response` will simply block for
the response. We can also block for the response for a specific timeout:
-[source,js]
+[source,groovy]
--------------------------------------------------
IndexResponse response = indexR.response "5s" // block for 5 seconds, same as:
response = indexR.response 5, TimeValue.SECONDS //
@@ -47,7 +47,7 @@ response = indexR.response 5, TimeValue.SECONDS //
We can also register closures that will be called on success and on
failure:
-[source,js]
+[source,groovy]
--------------------------------------------------
indexR.success = {IndexResponse response ->
println "Indexed $response.id into $response.index/$response.type"
@@ -65,7 +65,7 @@ This option allows to pass the actual instance of the request (instead
of a closure) as a parameter. The rest is similar to the closure as a
parameter option (the `GActionFuture` handling). For example:
-[source,js]
+[source,groovy]
--------------------------------------------------
def indexR = client.index (new IndexRequest(
index: "test",
@@ -90,7 +90,7 @@ The last option is to provide an actual instance of the API request, and
an `ActionListener` for the callback. This is exactly like the Java API
with the added `gexecute` which returns the `GActionFuture`:
-[source,js]
+[source,groovy]
--------------------------------------------------
def indexR = node.client.prepareIndex("test", "type1", "1").setSource({
test = "value"
diff --git a/docs/groovy-api/client.asciidoc b/docs/groovy-api/client.asciidoc
index c0a6d68841..a2745f459b 100644
--- a/docs/groovy-api/client.asciidoc
+++ b/docs/groovy-api/client.asciidoc
@@ -13,7 +13,7 @@ within the cluster.
A Node based client is the simplest form to get a `GClient` to start
executing operations against elasticsearch.
-[source,js]
+[source,groovy]
--------------------------------------------------
import org.elasticsearch.groovy.client.GClient
import org.elasticsearch.groovy.node.GNode
@@ -33,7 +33,7 @@ Since elasticsearch allows to configure it using JSON based settings,
the configuration itself can be done using a closure that represent the
JSON:
-[source,js]
+[source,groovy]
--------------------------------------------------
import org.elasticsearch.groovy.node.GNode
import org.elasticsearch.groovy.node.GNodeBuilder
diff --git a/docs/groovy-api/delete.asciidoc b/docs/groovy-api/delete.asciidoc
index e332012696..18f11e6777 100644
--- a/docs/groovy-api/delete.asciidoc
+++ b/docs/groovy-api/delete.asciidoc
@@ -6,7 +6,7 @@ The delete API is very similar to the
Java delete API, here is an
example:
-[source,js]
+[source,groovy]
--------------------------------------------------
def deleteF = node.client.delete {
index "test"
diff --git a/docs/groovy-api/get.asciidoc b/docs/groovy-api/get.asciidoc
index 6bf476c16a..824c18f8f1 100644
--- a/docs/groovy-api/get.asciidoc
+++ b/docs/groovy-api/get.asciidoc
@@ -7,7 +7,7 @@ Java get API. The main benefit
of using groovy is handling the source content. It can be automatically
converted to a `Map` which means using Groovy to navigate it is simple:
-[source,js]
+[source,groovy]
--------------------------------------------------
def getF = node.client.get {
index "test"
diff --git a/docs/groovy-api/index_.asciidoc b/docs/groovy-api/index_.asciidoc
index cd7f0ca4ac..b63a212352 100644
--- a/docs/groovy-api/index_.asciidoc
+++ b/docs/groovy-api/index_.asciidoc
@@ -7,7 +7,7 @@ Java index API. The Groovy
extension to it is the ability to provide the indexed source using a
closure. For example:
-[source,js]
+[source,groovy]
--------------------------------------------------
def indexR = client.index {
index "test"
diff --git a/docs/groovy-api/search.asciidoc b/docs/groovy-api/search.asciidoc
index 946760d95c..d0b74a4d65 100644
--- a/docs/groovy-api/search.asciidoc
+++ b/docs/groovy-api/search.asciidoc
@@ -7,7 +7,7 @@ Java search API. The Groovy
extension allows to provide the search source to execute as a `Closure`
including the query itself (similar to GORM criteria builder):
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.search {
indices "test"
@@ -19,7 +19,7 @@ def search = node.client.search {
}
}
-search.response.hits.each {SearchHit hit ->
+search.response.hits.each {SearchHit hit ->
println "Got hit $hit.id from $hit.index/$hit.type"
}
--------------------------------------------------
@@ -27,13 +27,13 @@ search.response.hits.each {SearchHit hit ->
It can also be executed using the "Java API" while still using a closure
for the query:
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.prepareSearch("test").setQuery({
term(test: "value")
}).gexecute();
-search.response.hits.each {SearchHit hit ->
+search.response.hits.each {SearchHit hit ->
println "Got hit $hit.id from $hit.index/$hit.type"
}
--------------------------------------------------
@@ -48,7 +48,7 @@ The format of the search `Closure` follows the same JSON syntax as the
Term query where multiple values are provided (see
{ref}/query-dsl-terms-query.html[terms]):
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.search {
indices "test"
@@ -64,7 +64,7 @@ def search = node.client.search {
Query string (see
{ref}/query-dsl-query-string-query.html[query string]):
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.search {
indices "test"
@@ -82,7 +82,7 @@ def search = node.client.search {
Pagination (see
{ref}/search-request-from-size.html[from/size]):
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.search {
indices "test"
@@ -99,7 +99,7 @@ def search = node.client.search {
Sorting (see {ref}/search-request-sort.html[sort]):
-[source,js]
+[source,groovy]
--------------------------------------------------
def search = node.client.search {
indices "test"
diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc
index d8e2d8848b..4c47639117 100644
--- a/docs/java-api/index.asciidoc
+++ b/docs/java-api/index.asciidoc
@@ -1,7 +1,7 @@
[[java-api]]
= Java API
:ref: http://www.elastic.co/guide/en/elasticsearch/reference/master
-:version: 5.0.0-alpha5
+:version: 6.0.0-alpha1
[preface]
== Preface
diff --git a/docs/java-api/query-dsl/geo-queries.asciidoc b/docs/java-api/query-dsl/geo-queries.asciidoc
index 18ed0f37c1..55184bde32 100644
--- a/docs/java-api/query-dsl/geo-queries.asciidoc
+++ b/docs/java-api/query-dsl/geo-queries.asciidoc
@@ -30,12 +30,6 @@ The queries in this group are:
Find documents with geo-points within the specified polygon.
-<<java-query-dsl-geohash-cell-query,`geohash_cell`>> query::
-
- Find geo-points whose geohash intersects with the geohash of the specified
- point.
-
-
include::geo-shape-query.asciidoc[]
include::geo-bounding-box-query.asciidoc[]
@@ -45,5 +39,3 @@ include::geo-distance-query.asciidoc[]
include::geo-distance-range-query.asciidoc[]
include::geo-polygon-query.asciidoc[]
-
-include::geohash-cell-query.asciidoc[]
diff --git a/docs/java-api/query-dsl/geohash-cell-query.asciidoc b/docs/java-api/query-dsl/geohash-cell-query.asciidoc
deleted file mode 100644
index 7aad54892a..0000000000
--- a/docs/java-api/query-dsl/geohash-cell-query.asciidoc
+++ /dev/null
@@ -1,17 +0,0 @@
-[[java-query-dsl-geohash-cell-query]]
-==== Geohash Cell Query
-
-See {ref}/query-dsl-geohash-cell-query.html[Geohash Cell Query]
-
-[source,java]
---------------------------------------------------
-QueryBuilder qb = geoHashCellQuery("pin.location", <1>
- new GeoPoint(13.4080, 52.5186)) <2>
- .neighbors(true) <3>
- .precision(3); <4>
---------------------------------------------------
-<1> field
-<2> point. Can also be a hash like `u30`
-<3> The `neighbors` option of the filter offers the possibility to filter cells
- next to the given cell.
-<4> precision level
diff --git a/docs/java-api/query-dsl/script-query.asciidoc b/docs/java-api/query-dsl/script-query.asciidoc
index 33786b693d..5d30cab418 100644
--- a/docs/java-api/query-dsl/script-query.asciidoc
+++ b/docs/java-api/query-dsl/script-query.asciidoc
@@ -14,7 +14,7 @@ QueryBuilder qb = scriptQuery(
If you have stored on each data node a script named `myscript.painless` with:
-[source,js]
+[source,painless]
--------------------------------------------------
doc['num1'].value > params.param1
--------------------------------------------------
@@ -35,5 +35,3 @@ QueryBuilder qb = scriptQuery(
<2> Script type: either `ScriptType.FILE`, `ScriptType.INLINE` or `ScriptType.INDEXED`
<3> Scripting engine
<4> Parameters as a `Map` of `<String, Object>`
-
-
diff --git a/docs/java-rest/index.asciidoc b/docs/java-rest/index.asciidoc
index 2c2435d271..1247ae1996 100644
--- a/docs/java-rest/index.asciidoc
+++ b/docs/java-rest/index.asciidoc
@@ -1,7 +1,7 @@
[[java-rest]]
= Java REST Client
-:version: 5.0.0-alpha5
+:version: 6.0.0-alpha1
include::overview.asciidoc[]
diff --git a/docs/java-rest/usage.asciidoc b/docs/java-rest/usage.asciidoc
index 69f95413a7..55eac8e466 100644
--- a/docs/java-rest/usage.asciidoc
+++ b/docs/java-rest/usage.asciidoc
@@ -90,16 +90,67 @@ http://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/
=== Performing requests
Once the `RestClient` has been created, requests can be sent by calling one of
-the available `performRequest` method variants. The ones that return the
-`Response` are executed synchronously, meaning that the client will block and
-wait for a response to be returned. The `performRequest` variants that return
-`void` accept a `ResponseListener` as an argument and are executed
-asynchronously. The provided listener will be notified upon completion or
-failure. The following are the arguments accepted by the different
-`performRequest` methods:
+the available `performRequest` or `performRequestAsync` method variants.
+The `performRequest` methods are synchronous and they return the `Response`
+directly, meaning that the client will block and wait for a response to be returned.
+The `performRequestAsync` variants, which return `void` and accept an extra
+`ResponseListener` as an argument, are executed asynchronously. The provided
+listener will be notified upon completion or failure.
+
+[source,java]
+--------------------------------------------------
+// Synchronous variants
+Response performRequest(String method, String endpoint,
+ Header... headers)
+ throws IOException;
+
+Response performRequest(String method, String endpoint,
+ Map<String, String> params, Header... headers)
+ throws IOException;
+
+Response performRequest(String method, String endpoint,
+ Map<String, String> params,
+ HttpEntity entity,
+ Header... headers)
+ throws IOException;
+
+Response performRequest(String method, String endpoint,
+ Map<String, String> params,
+ HttpEntity entity,
+ HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
+ Header... headers)
+ throws IOException;
+
+// Asynchronous variants
+void performRequestAsync(String method, String endpoint,
+ ResponseListener responseListener,
+ Header... headers);
+
+void performRequestAsync(String method, String endpoint,
+ Map<String, String> params,
+ ResponseListener responseListener,
+ Header... headers);
+
+void performRequestAsync(String method, String endpoint,
+ Map<String, String> params,
+ HttpEntity entity,
+ ResponseListener responseListener,
+ Header... headers);
+
+void performRequestAsync(String method, String endpoint,
+ Map<String, String> params,
+ HttpEntity entity,
+ ResponseListener responseListener,
+ HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
+ Header... headers);
+--------------------------------------------------
+
+==== Request Arguments
+
+The following are the arguments accepted by the different methods:
`method`:: the http method or verb
-`endpoint`:: the request path, which identifies the Elasticsearch api to
+`endpoint`:: the request path, which identifies the Elasticsearch API to
call (e.g. `/_cluster/health`)
`params`:: the optional parameters to be sent as querystring parameters
`entity`:: the optional request body enclosed in an
@@ -109,14 +160,14 @@ http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http
callback. Controls how the response body gets streamed from a non-blocking
HTTP connection on the client side. When not provided, the default
implementation is used which buffers the whole response body in heap memory
-`responseListener`:: the listener to be notified upon request success or failure
-whenever the async `performRequest` method variants are used
+`responseListener`:: the listener to be notified upon asynchronous
+request success or failure
`headers`:: optional request headers
=== Reading responses
-The `Response` object, either returned by the sync `performRequest` methods or
- received as an argument in `ResponseListener#onSucces(Response)`, wraps the
+The `Response` object, either returned by the synchronous `performRequest` methods or
+received as an argument in `ResponseListener#onSuccess(Response)`, wraps the
response object returned by the http client and exposes the following information:
`getRequestLine`:: information about the performed request
@@ -129,14 +180,19 @@ https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/Ht
object
When performing a request, an exception is thrown (or received as an argument
- in `ResponseListener#onSucces(Exception)` in the following scenarios:
+ in `ResponseListener#onFailure(Exception)` in the following scenarios:
`IOException`:: communication problem (e.g. SocketTimeoutException etc.)
`ResponseException`:: a response was returned, but its status code indicated
-an error (either `4xx` or `5xx`). A `ResponseException` originates from a valid
+an error (not `2xx`). A `ResponseException` originates from a valid
http response, hence it exposes its corresponding `Response` object which gives
access to the returned response.
+NOTE: A `ResponseException` is **not** thrown for `HEAD` requests that return
+a `404` status code because it is an expected `HEAD` response that simply
+denotes that the resource is not found. All other HTTP methods (e.g., `GET`)
+throw a `ResponseException` for `404` responses.
+
=== Example requests
@@ -167,6 +223,7 @@ Response indexResponse = restClient.performRequest(
Note that the low-level client doesn't expose any helper for json marshalling
and un-marshalling. Users are free to use the library that they prefer for that
purpose.
+
The underlying Apache Async Http Client ships with different
https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/HttpEntity.html[`org.apache.http.HttpEntity`]
implementations that allow to provide the request body in different formats
@@ -184,7 +241,7 @@ The following is a basic example of how async requests can be sent:
int numRequests = 10;
final CountDownLatch latch = new CountDownLatch(numRequests);
for (int i = 0; i < numRequests; i++) {
- restClient.performRequest(
+ restClient.performRequestAsync(
"PUT",
"/twitter/tweet/" + i,
Collections.<String, String>emptyMap(),
diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index b43a10e25b..1677634bb5 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -17,11 +17,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install analysis-icu
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/analysis-icu/{version}/analysis-icu-{version}.zip.
+
[[analysis-icu-remove]]
[float]
==== Removal
@@ -32,7 +34,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove analysis-icu
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc
index cba44a4d64..6b3dc0a72f 100644
--- a/docs/plugins/analysis-kuromoji.asciidoc
+++ b/docs/plugins/analysis-kuromoji.asciidoc
@@ -14,11 +14,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install analysis-kuromoji
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/analysis-kuromoji/{version}/analysis-kuromoji-{version}.zip.
+
[[analysis-kuromoji-remove]]
[float]
==== Removal
@@ -29,7 +31,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove analysis-kuromoji
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -474,10 +475,11 @@ The above request returns:
"start_offset" : 5,
"end_offset" : 8,
"type" : "word",
- "position" : 3
+ "position" : 2
} ]
}
--------------------------------------------------
+// TESTRESPONSE
[[analysis-kuromoji-number]]
===== `kuromoji_number` token filter
diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc
index 4fcfcf6cab..0544900a8c 100644
--- a/docs/plugins/analysis-phonetic.asciidoc
+++ b/docs/plugins/analysis-phonetic.asciidoc
@@ -15,11 +15,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install analysis-phonetic
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/analysis-phonetic/{version}/analysis-phonetic-{version}.zip.
+
[[analysis-phonetic-remove]]
[float]
==== Removal
@@ -30,7 +32,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove analysis-phonetic
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/analysis-smartcn.asciidoc b/docs/plugins/analysis-smartcn.asciidoc
index 665ccbaf61..5d1c13ef04 100644
--- a/docs/plugins/analysis-smartcn.asciidoc
+++ b/docs/plugins/analysis-smartcn.asciidoc
@@ -20,11 +20,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install analysis-smartcn
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/analysis-smartcn/{version}/analysis-smartcn-{version}.zip.
+
[[analysis-smartcn-remove]]
[float]
==== Removal
@@ -35,7 +37,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove analysis-smartcn
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/analysis-stempel.asciidoc b/docs/plugins/analysis-stempel.asciidoc
index 8a42135a87..d2502521b0 100644
--- a/docs/plugins/analysis-stempel.asciidoc
+++ b/docs/plugins/analysis-stempel.asciidoc
@@ -17,11 +17,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install analysis-stempel
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/analysis-stempel/{version}/analysis-stempel-{version}.zip.
+
[[analysis-stempel-remove]]
[float]
==== Removal
@@ -32,7 +34,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove analysis-stempel
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc
index a9ec9929b4..342d05d6fa 100644
--- a/docs/plugins/discovery-azure-classic.asciidoc
+++ b/docs/plugins/discovery-azure-classic.asciidoc
@@ -17,11 +17,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install discovery-azure-classic
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/discovery-azure-classic/{version}/discovery-azure-classic-{version}.zip.
+
[[discovery-azure-classic-remove]]
[float]
==== Removal
@@ -32,7 +34,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove discovery-azure-classic
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -359,7 +360,7 @@ ssh azure-elasticsearch-cluster.cloudapp.net
Once connected, install Elasticsearch:
-[source,sh]
+["source","sh",subs="attributes,callouts"]
----
# Install Latest Java version
# Read http://www.webupd8.org/2012/09/install-oracle-java-8-in-ubuntu-via-ppa.html for details
@@ -372,36 +373,46 @@ sudo apt-get install oracle-java8-installer
# sudo apt-get install openjdk-8-jre-headless
# Download Elasticsearch
-curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-2.0.0.deb -o elasticsearch-2.0.0.deb
+curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.deb -o elasticsearch-{version}.deb
# Prepare Elasticsearch installation
-sudo dpkg -i elasticsearch-2.0.0.deb
+sudo dpkg -i elasticsearch-{version}.deb
----
+// NOTCONSOLE
Check that elasticsearch is running:
-[source,sh]
+[source,js]
----
-curl http://localhost:9200/
+GET /
----
+// CONSOLE
This command should give you a JSON result:
-[source,js]
-----
+["source","js",subs="attributes,callouts"]
+--------------------------------------------
{
- "status" : 200,
- "name" : "Living Colossus",
+ "name" : "Cp8oag6",
+ "cluster_name" : "elasticsearch",
+ "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",
"version" : {
- "number" : "2.0.0",
- "build_hash" : "a46900e9c72c0a623d71b54016357d5f94c8ea32",
- "build_timestamp" : "2014-02-12T16:18:34Z",
+ "number" : "{version}",
+ "build_hash" : "f27399d",
+ "build_date" : "2016-03-30T09:51:41.449Z",
"build_snapshot" : false,
- "lucene_version" : "5.1"
+ "lucene_version" : "{lucene_version}"
},
"tagline" : "You Know, for Search"
}
-----
+--------------------------------------------
+// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/]
+// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/]
+// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/]
+// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/]
+// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/]
+// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/]
+// So much s/// but at least we test that the layout is close to matching....
[[discovery-azure-classic-long-plugin]]
===== Install elasticsearch cloud azure plugin
diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc
index 8c11942fff..8520cf1d16 100644
--- a/docs/plugins/discovery-ec2.asciidoc
+++ b/docs/plugins/discovery-ec2.asciidoc
@@ -3,6 +3,8 @@
The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] for unicast discovery.
+*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.*
+
[[discovery-ec2-install]]
[float]
==== Installation
@@ -13,11 +15,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install discovery-ec2
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/discovery-ec2/{version}/discovery-ec2-{version}.zip.
+
[[discovery-ec2-remove]]
[float]
==== Removal
@@ -28,7 +32,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove discovery-ec2
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -229,6 +232,7 @@ Management Console. It should look similar to this.
"Version": "2012-10-17"
}
----
+// NOTCONSOLE
[[discovery-ec2-filtering]]
===== Filtering by Tags
diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc
new file mode 100644
index 0000000000..a848cdd6ff
--- /dev/null
+++ b/docs/plugins/discovery-file.asciidoc
@@ -0,0 +1,93 @@
+[[discovery-file]]
+=== File-Based Discovery Plugin
+
+The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file
+in the `config/discovery-file` directory for unicast discovery.
+
+[[discovery-file-install]]
+[float]
+==== Installation
+
+This plugin can be installed using the plugin manager:
+
+[source,sh]
+----------------------------------------------------------------
+sudo bin/elasticsearch-plugin install discovery-file
+----------------------------------------------------------------
+
+The plugin must be installed on every node in the cluster, and each node must
+be restarted after installation. Note that installing the plugin will add a
+`discovery-file` directory to the `config` folder, and a default `unicast_hosts.txt`
+file that must be edited with the correct unicast hosts list before starting the node.
+
+[[discovery-file-remove]]
+[float]
+==== Removal
+
+The plugin can be removed with the following command:
+
+[source,sh]
+----------------------------------------------------------------
+sudo bin/elasticsearch-plugin remove discovery-file
+----------------------------------------------------------------
+
+The node must be stopped before removing the plugin.
+
+[[discovery-file-usage]]
+[float]
+==== Using the file-based discovery plugin
+
+The file-based discovery plugin provides the ability to specify the
+unicast hosts list through a simple `unicast_hosts.txt` file that can
+be dynamically updated at any time. The discovery type for this plugin
+is still the default `zen` plugin, so no changes are required to the
+`elasticsearch.yml` config file. This plugin simply provides a facility
+to supply the unicast hosts list for zen discovery through an external
+file that can be updated at any time by a side process.
+
+For example, this gives a convenient mechanism for an Elasticsearch instance
+that is run in docker containers to be dynamically supplied a list of IP
+addresses to connect to for zen discovery when those IP addresses may not be
+known at node startup.
+
+Note that the file-based discovery plugin is meant to augment the unicast
+hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore,
+if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`,
+they will be used in addition to those supplied in `unicast_hosts.txt`.
+
+Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch
+continues to run, the new changes will be picked up by the plugin and the
+new hosts list will be used for the next pinging round for master election.
+
+Upon installation of the plugin, a default `unicast_hosts.txt` file will
+be found in the `$CONFIG_DIR/discovery-file` directory. This default file
+will contain some comments about what the file should contain. All comments
+for this file must appear on their lines starting with `#` (i.e. comments
+cannot start in the middle of a line).
+
+[[discovery-file-format]]
+[float]
+==== unicast_hosts.txt file format
+
+The format of the file is to specify one unicast host entry per line.
+Each unicast host entry consists of the host (host name or IP address) and
+an optional transport port number. If the port number is specified, is must
+come immediately after the host (on the same line) separated by a `:`.
+If the port number is not specified, a default value of 9300 is used.
+
+For example, this is an example of `unicast_hosts.txt` for a cluster with
+four nodes that participate in unicast discovery, some of which are not
+running on the default port:
+
+[source,txt]
+----------------------------------------------------------------
+10.10.10.5
+10.10.10.6:9305
+10.10.10.5:10005
+# an IPv6 address
+[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301
+----------------------------------------------------------------
+
+Host names are allowed instead of IP addresses (similar to
+`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be
+specified in brackets with the port coming after the brackets.
diff --git a/docs/plugins/discovery-gce.asciidoc b/docs/plugins/discovery-gce.asciidoc
index aa458d28af..fee10a96cb 100644
--- a/docs/plugins/discovery-gce.asciidoc
+++ b/docs/plugins/discovery-gce.asciidoc
@@ -13,11 +13,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install discovery-gce
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/discovery-gce/{version}/discovery-gce-{version}.zip.
+
[[discovery-gce-remove]]
[float]
==== Removal
@@ -28,7 +30,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove discovery-gce
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -271,11 +272,11 @@ If anything goes wrong, you should check logs:
tail -f /var/log/elasticsearch/elasticsearch.log
--------------------------------------------------
-If needed, you can change log level to `TRACE` by opening `logging.yml`:
+If needed, you can change log level to `trace` by opening `log4j2.properties`:
[source,sh]
--------------------------------------------------
-sudo vi /etc/elasticsearch/logging.yml
+sudo vi /etc/elasticsearch/log4j2.properties
--------------------------------------------------
and adding the following line:
@@ -283,7 +284,8 @@ and adding the following line:
[source,yaml]
--------------------------------------------------
# discovery
-discovery.gce: TRACE
+logger.discovery_gce.name = discovery.gce
+logger.discovery_gce.level = trace
--------------------------------------------------
diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc
index 999bf9c0e1..96a1c1e7b2 100644
--- a/docs/plugins/discovery.asciidoc
+++ b/docs/plugins/discovery.asciidoc
@@ -21,6 +21,10 @@ The Azure Classic discovery plugin uses the Azure Classic API for unicast discov
The Google Compute Engine discovery plugin uses the GCE API for unicast discovery.
+<<discovery-file,File-based discovery>>::
+
+The File-based discovery plugin allows providing the unicast hosts list through a dynamically updatable file.
+
[float]
==== Community contributed discovery plugins
@@ -37,3 +41,5 @@ include::discovery-azure-classic.asciidoc[]
include::discovery-gce.asciidoc[]
+include::discovery-file.asciidoc[]
+
diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc
index 725c200ee9..f969dc4918 100644
--- a/docs/plugins/index.asciidoc
+++ b/docs/plugins/index.asciidoc
@@ -1,7 +1,10 @@
= Elasticsearch Plugins and Integrations
-:ref: https://www.elastic.co/guide/en/elasticsearch/reference/master
-:guide: https://www.elastic.co/guide
+:ref: https://www.elastic.co/guide/en/elasticsearch/reference/master
+:guide: https://www.elastic.co/guide
+:version: 6.0.0-alpha1
+:lucene_version: 6.2.0
+:plugin_url: https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin
[[intro]]
== Introduction to plugins
@@ -11,14 +14,32 @@ manner. They range from adding custom mapping types, custom analyzers, native
scripts, custom discovery and more.
Plugins contain JAR files, but may also contain scripts and config files, and
-must be installed on every node in the cluster. After installation, each
+must be installed on every node in the cluster. After installation, each
node must be restarted before the plugin becomes visible.
-IMPORTANT: Site plugins -- plugins containing HTML, CSS and Javascript -- are
-no longer supported.
+NOTE: A full cluster restart is required for installing plugins that have
+custom cluster state metadata, such as X-Pack. It is still possible to upgrade
+such plugins with a rolling restart.
+
+This documentation distinguishes two categories of plugins:
+
+Core Plugins:: This category identifies plugins that are part of Elasticsearch
+project. Delivered at the same time as Elasticsearch, their version number always
+matches the version number of Elasticsearch itself. These plugins are maintained
+by the Elastic team with the appreciated help of amazing community members (for
+open source plugins). Issues and bug reports can be reported on the
+https://github.com/elastic/elasticsearch[Github project page].
+
+Community contributed:: This category identifies plugins that are external to
+the Elasticsearch project. They are provided by individual developers or private
+companies and have their own licenses as well as their own versioning system.
+Issues and bug reports can usually be reported on the community plugin's web site.
For advice on writing your own plugin, see <<plugin-authors>>.
+IMPORTANT: Site plugins -- plugins containing HTML, CSS and Javascript -- are
+no longer supported.
+
include::plugin-script.asciidoc[]
include::api.asciidoc[]
@@ -48,5 +69,3 @@ include::integrations.asciidoc[]
include::authors.asciidoc[]
include::redirects.asciidoc[]
-
-
diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc
index 3c72e51409..dc9c53412f 100644
--- a/docs/plugins/ingest-attachment.asciidoc
+++ b/docs/plugins/ingest-attachment.asciidoc
@@ -21,11 +21,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install ingest-attachment
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/ingest-attachment/{version}/ingest-attachment-{version}.zip.
+
[[ingest-attachment-remove]]
[float]
==== Removal
@@ -36,7 +38,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove ingest-attachment
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc
index d6eced47ec..9b8e6824fb 100644
--- a/docs/plugins/ingest-geoip.asciidoc
+++ b/docs/plugins/ingest-geoip.asciidoc
@@ -2,7 +2,8 @@
=== Ingest Geoip Processor Plugin
The GeoIP processor adds information about the geographical location of IP addresses, based on data from the Maxmind databases.
-This processor adds this information by default under the `geoip` field.
+This processor adds this information by default under the `geoip` field. The `geoip` processor can resolve both IPv4 and
+IPv6 addresses.
The ingest-geoip plugin ships by default with the GeoLite2 City and GeoLite2 Country geoip2 databases from Maxmind made available
under the CCA-ShareAlike 3.0 license. For more details see, http://dev.maxmind.com/geoip/geoip2/geolite2/
@@ -21,11 +22,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install ingest-geoip
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/ingest-geoip/{version}/ingest-geoip-{version}.zip.
+
[[ingest-geoip-remove]]
[float]
==== Removal
@@ -36,7 +39,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove ingest-geoip
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -156,3 +158,48 @@ returns this:
}
--------------------------------------------------
// TESTRESPONSE
+
+
+Not all IP addresses find geo information from the database, When this
+occurs, no `target_field` is inserted into the document.
+
+Here is an example of what documents will be indexed as when information for "93.114.45.13"
+cannot be found:
+
+[source,js]
+--------------------------------------------------
+PUT _ingest/pipeline/geoip
+{
+ "description" : "Add geoip info",
+ "processors" : [
+ {
+ "geoip" : {
+ "field" : "ip"
+ }
+ }
+ ]
+}
+PUT my_index/my_type/my_id?pipeline=geoip
+{
+ "ip": "93.114.45.13"
+}
+GET my_index/my_type/my_id
+--------------------------------------------------
+// CONSOLE
+
+Which returns:
+
+[source,js]
+--------------------------------------------------
+{
+ "found": true,
+ "_index": "my_index",
+ "_type": "my_type",
+ "_id": "my_id",
+ "_version": 1,
+ "_source": {
+ "ip": "93.114.45.13"
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/plugins/ingest-user-agent.asciidoc b/docs/plugins/ingest-user-agent.asciidoc
index 95997a34c1..fc4d35ebfe 100644
--- a/docs/plugins/ingest-user-agent.asciidoc
+++ b/docs/plugins/ingest-user-agent.asciidoc
@@ -16,11 +16,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install ingest-user-agent
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/ingest-user-agent/{version}/ingest-user-agent-{version}.zip.
+
[[ingest-user-agent-remove]]
[float]
==== Removal
@@ -31,7 +33,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove ingest-user-agent
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/lang-javascript.asciidoc b/docs/plugins/lang-javascript.asciidoc
index 422c23ed0b..0670b0adad 100644
--- a/docs/plugins/lang-javascript.asciidoc
+++ b/docs/plugins/lang-javascript.asciidoc
@@ -1,6 +1,8 @@
[[lang-javascript]]
=== JavaScript Language Plugin
+deprecated[5.0.0,JavaScript will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]]
+
The JavaScript language plugin enables the use of JavaScript in Elasticsearch
scripts, via Mozilla's
https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino[Rhino JavaScript] engine.
@@ -15,11 +17,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install lang-javascript
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/lang-javascript/{version}/lang-javascript-{version}.zip.
+
[[lang-javascript-remove]]
[float]
==== Removal
@@ -30,7 +34,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove lang-javascript
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -145,7 +148,7 @@ JavaScript:
First, save this file as `config/scripts/my_script.js` on every node
in the cluster:
-[source,js]
+[source,painless]
----
doc["num"].value * factor
----
diff --git a/docs/plugins/lang-python.asciidoc b/docs/plugins/lang-python.asciidoc
index 96d00d9e2c..55de552537 100644
--- a/docs/plugins/lang-python.asciidoc
+++ b/docs/plugins/lang-python.asciidoc
@@ -1,6 +1,8 @@
[[lang-python]]
=== Python Language Plugin
+deprecated[5.0.0,Python will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]]
+
The Python language plugin enables the use of Python in Elasticsearch
scripts, via the http://www.jython.org/[Jython] Java implementation of Python.
@@ -14,11 +16,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install lang-python
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/lang-python/{version}/lang-python-{version}.zip.
+
[[lang-python-remove]]
[float]
==== Removal
@@ -29,7 +33,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove lang-python
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc
deleted file mode 100644
index b5ceef39ae..0000000000
--- a/docs/plugins/mapper-attachments.asciidoc
+++ /dev/null
@@ -1,418 +0,0 @@
-[[mapper-attachments]]
-=== Mapper Attachments Plugin
-
-deprecated[5.0.0,The `mapper-attachments` plugin has been replaced by the <<ingest-attachment, `ingest-attachment`>> plugin]
-
-The mapper attachments plugin lets Elasticsearch index file attachments in common formats (such as PPT, XLS, PDF)
-using the Apache text extraction library http://lucene.apache.org/tika/[Tika].
-
-In practice, the plugin adds the `attachment` type when mapping properties so that documents can be populated with
-file attachment contents (encoded as `base64`).
-
-[[mapper-attachments-install]]
-[float]
-==== Installation
-
-This plugin can be installed using the plugin manager:
-
-[source,sh]
-----------------------------------------------------------------
-sudo bin/elasticsearch-plugin install mapper-attachments
-----------------------------------------------------------------
-// NOTCONSOLE
-
-The plugin must be installed on every node in the cluster, and each node must
-be restarted after installation.
-
-[[mapper-attachments-remove]]
-[float]
-==== Removal
-
-The plugin can be removed with the following command:
-
-[source,sh]
-----------------------------------------------------------------
-sudo bin/elasticsearch-plugin remove mapper-attachments
-----------------------------------------------------------------
-// NOTCONSOLE
-
-The node must be stopped before removing the plugin.
-
-[[mapper-attachments-helloworld]]
-==== Hello, world
-
-Create a property mapping using the new type `attachment`:
-
-[source,js]
---------------------------
-PUT /trying-out-mapper-attachments
-{
- "mappings": {
- "person": {
- "properties": {
- "cv": { "type": "attachment" }
-}}}}
---------------------------
-// CONSOLE
-
-Index a new document populated with a `base64`-encoded attachment:
-
-[source,js]
---------------------------
-POST /trying-out-mapper-attachments/person/1?refresh
-{
- "cv": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0="
-}
---------------------------
-// CONSOLE
-// TEST[continued]
-
-Search for the document using words in the attachment:
-
-[source,js]
---------------------------
-POST /trying-out-mapper-attachments/person/_search
-{
- "query": {
- "query_string": {
- "query": "ipsum"
-}}}
---------------------------
-// CONSOLE
-// TEST[continued]
-
-If you get a hit for your indexed document, the plugin should be installed and working. It'll look like:
-
-[source,js]
---------------------------
-{
- "timed_out": false,
- "took": 53,
- "hits": {
- "total": 1,
- "max_score": 0.25811607,
- "hits": [
- {
- "_score": 0.25811607,
- "_index": "trying-out-mapper-attachments",
- "_type": "person",
- "_id": "1",
- "_source": {
- "cv": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0="
- }
- }
- ]
- },
- "_shards": ...
-}
---------------------------
-// TESTRESPONSE[s/"took": 53/"took": "$body.took"/]
-// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards"/]
-
-[[mapper-attachments-usage]]
-==== Usage
-
-Using the attachment type is simple, in your mapping JSON, simply set a certain JSON element as attachment, for example:
-
-[source,js]
---------------------------
-PUT /test
-{
- "mappings": {
- "person" : {
- "properties" : {
- "my_attachment" : { "type" : "attachment" }
- }
- }
- }
-}
---------------------------
-// CONSOLE
-
-In this case, the JSON to index can be:
-
-[source,js]
---------------------------
-PUT /test/person/1
-{
- "my_attachment" : "... base64 encoded attachment ..."
-}
---------------------------
-// CONSOLE
-
-Or it is possible to use more elaborated JSON if content type, resource name or language need to be set explicitly:
-
-[source,js]
---------------------------
-PUT /test/person/1
-{
- "my_attachment" : {
- "_content_type" : "application/pdf",
- "_name" : "resource/name/of/my.pdf",
- "_language" : "en",
- "_content" : "... base64 encoded attachment ..."
- }
-}
---------------------------
-// CONSOLE
-
-The `attachment` type not only indexes the content of the doc in `content` sub field, but also automatically adds meta
-data on the attachment as well (when available).
-
-The metadata supported are:
-
-* `date`
-* `title`
-* `name` only available if you set `_name` see above
-* `author`
-* `keywords`
-* `content_type`
-* `content_length` is the original content_length before text extraction (aka file size)
-* `language`
-
-They can be queried using the "dot notation", for example: `my_attachment.author`.
-
-Both the meta data and the actual content are simple core type mappers (text, date, …), thus, they can be controlled
-in the mappings. For example:
-
-[source,js]
---------------------------
-PUT /test
-{
- "settings": {
- "index": {
- "analysis": {
- "analyzer": {
- "my_analyzer": {
- "type": "custom",
- "tokenizer": "standard",
- "filter": ["standard"]
- }
- }
- }
- }
- },
- "mappings": {
- "person" : {
- "properties" : {
- "file" : {
- "type" : "attachment",
- "fields" : {
- "content" : {"index" : true},
- "title" : {"store" : true},
- "date" : {"store" : true},
- "author" : {"analyzer" : "my_analyzer"},
- "keywords" : {"store" : true},
- "content_type" : {"store" : true},
- "content_length" : {"store" : true},
- "language" : {"store" : true}
- }
- }
- }
- }
- }
-}
---------------------------
-// CONSOLE
-
-In the above example, the actual content indexed is mapped under `fields` name `content`, and we decide not to index it, so
-it will only be available in the `_all` field. The other fields map to their respective metadata names, but there is no
-need to specify the `type` (like `text` or `date`) since it is already known.
-
-==== Querying or accessing metadata
-
-If you need to query on metadata fields, use the attachment field name dot the metadata field. For example:
-
-[source,js]
---------------------------
-PUT /test
-PUT /test/person/_mapping
-{
- "person": {
- "properties": {
- "file": {
- "type": "attachment",
- "fields": {
- "content_type": {
- "type": "text",
- "store": true
- }
- }
- }
- }
- }
-}
-PUT /test/person/1?refresh=true
-{
- "file": "IkdvZCBTYXZlIHRoZSBRdWVlbiIgKGFsdGVybmF0aXZlbHkgIkdvZCBTYXZlIHRoZSBLaW5nIg=="
-}
-GET /test/person/_search
-{
- "stored_fields": [ "file.content_type" ],
- "query": {
- "match": {
- "file.content_type": "text plain"
- }
- }
-}
---------------------------
-// CONSOLE
-
-Will give you:
-
-[source,js]
---------------------------
-{
- "took": 2,
- "timed_out": false,
- "_shards": {
- "total": 5,
- "successful": 5,
- "failed": 0
- },
- "hits": {
- "total": 1,
- "max_score": 0.16273327,
- "hits": [
- {
- "_index": "test",
- "_type": "person",
- "_id": "1",
- "_score": 0.16273327,
- "fields": {
- "file.content_type": [
- "text/plain; charset=ISO-8859-1"
- ]
- }
- }
- ]
- }
-}
---------------------------
-
-[[mapper-attachments-indexed-characters]]
-==== Indexed Characters
-
-By default, `100000` characters are extracted when indexing the content. This default value can be changed by setting
-the `index.mapping.attachment.indexed_chars` setting. It can also be provided on a per document indexed using the
-`_indexed_chars` parameter. `-1` can be set to extract all text, but note that all the text needs to be allowed to be
-represented in memory:
-
-[source,js]
---------------------------
-PUT /test/person/1
-{
- "my_attachment" : {
- "_indexed_chars" : -1,
- "_content" : "... base64 encoded attachment ..."
- }
-}
---------------------------
-// CONSOLE
-
-[[mapper-attachments-error-handling]]
-==== Metadata parsing error handling
-
-While extracting metadata content, errors could happen for example when parsing dates.
-Parsing errors are ignored so your document is indexed.
-
-You can disable this feature by setting the `index.mapping.attachment.ignore_errors` setting to `false`.
-
-[[mapper-attachments-language-detection]]
-==== Language Detection
-
-By default, language detection is disabled (`false`) as it could come with a cost.
-This default value can be changed by setting the `index.mapping.attachment.detect_language` setting.
-It can also be provided on a per document indexed using the `_detect_language` parameter.
-
-Note that you can force language using `_language` field when sending your actual document:
-
-[source,js]
---------------------------
-{
- "my_attachment" : {
- "_language" : "en",
- "_content" : "... base64 encoded attachment ..."
- }
-}
---------------------------
-
-[[mapper-attachments-highlighting]]
-==== Highlighting attachments
-
-If you want to highlight your attachment content, you will need to set `"store": true` and
-`"term_vector":"with_positions_offsets"` for your attachment field. Here is a full script which does it:
-
-[source,js]
---------------------------
-PUT /test
-PUT /test/person/_mapping
-{
- "person": {
- "properties": {
- "file": {
- "type": "attachment",
- "fields": {
- "content": {
- "type": "text",
- "term_vector":"with_positions_offsets",
- "store": true
- }
- }
- }
- }
- }
-}
-PUT /test/person/1?refresh=true
-{
- "file": "IkdvZCBTYXZlIHRoZSBRdWVlbiIgKGFsdGVybmF0aXZlbHkgIkdvZCBTYXZlIHRoZSBLaW5nIg=="
-}
-GET /test/person/_search
-{
- "stored_fields": [],
- "query": {
- "match": {
- "file.content": "king queen"
- }
- },
- "highlight": {
- "fields": {
- "file.content": {
- }
- }
- }
-}
---------------------------
-// CONSOLE
-
-It gives back:
-
-[source,js]
---------------------------
-{
- "took": 9,
- "timed_out": false,
- "_shards": {
- "total": 1,
- "successful": 1,
- "failed": 0
- },
- "hits": {
- "total": 1,
- "max_score": 0.13561106,
- "hits": [
- {
- "_index": "test",
- "_type": "person",
- "_id": "1",
- "_score": 0.13561106,
- "highlight": {
- "file.content": [
- "\"God Save the <em>Queen</em>\" (alternatively \"God Save the <em>King</em>\"\n"
- ]
- }
- }
- ]
- }
-}
---------------------------
diff --git a/docs/plugins/mapper-murmur3.asciidoc b/docs/plugins/mapper-murmur3.asciidoc
index f81c226d1f..fd9f29ceb2 100644
--- a/docs/plugins/mapper-murmur3.asciidoc
+++ b/docs/plugins/mapper-murmur3.asciidoc
@@ -15,11 +15,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install mapper-murmur3
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/mapper-murmur3/{version}/mapper-murmur3-{version}.zip.
+
[[mapper-murmur3-remove]]
[float]
==== Removal
@@ -30,7 +32,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove mapper-murmur3
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc
index 4b2d02a6a2..76fdf71a67 100644
--- a/docs/plugins/mapper-size.asciidoc
+++ b/docs/plugins/mapper-size.asciidoc
@@ -15,11 +15,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install mapper-size
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/mapper-size/{version}/mapper-size-{version}.zip.
+
[[mapper-size-remove]]
[float]
==== Removal
@@ -30,7 +32,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove mapper-size
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/mapper.asciidoc b/docs/plugins/mapper.asciidoc
index 50a21bcb34..226fc4e40d 100644
--- a/docs/plugins/mapper.asciidoc
+++ b/docs/plugins/mapper.asciidoc
@@ -8,12 +8,6 @@ Mapper plugins allow new field datatypes to be added to Elasticsearch.
The core mapper plugins are:
-<<mapper-attachments>>::
-
-deprecated[5.0.0,The `mapper-attachments` plugin has been replaced by the <<ingest-attachment, `ingest-attachment`>> plugin]:
-The mapper-attachments integrates http://lucene.apache.org/tika/[Apache Tika] to provide a new field type `attachment`
-to allow indexing of documents such as PDFs and Microsoft Word.
-
<<mapper-size>>::
The mapper-size plugin provides the `_size` meta field which, when enabled,
@@ -25,6 +19,5 @@ indexes the size in bytes of the original
The mapper-murmur3 plugin allows hashes to be computed at index-time and stored
in the index for later use with the `cardinality` aggregation.
-include::mapper-attachments.asciidoc[]
include::mapper-size.asciidoc[]
include::mapper-murmur3.asciidoc[]
diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc
index f8fb881412..e8235d7582 100644
--- a/docs/plugins/plugin-script.asciidoc
+++ b/docs/plugins/plugin-script.asciidoc
@@ -53,7 +53,7 @@ sudo bin/elasticsearch-plugin install analysis-icu
This command will install the version of the plugin that matches your
Elasticsearch version and also show a progress bar while downloading.
-[float]
+[[plugin-management-custom-url]]
=== Custom URL or file system
A plugin can also be downloaded directly from a custom location by specifying the URL:
diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc
index 726f55cc88..c45979a85c 100644
--- a/docs/plugins/repository-azure.asciidoc
+++ b/docs/plugins/repository-azure.asciidoc
@@ -14,11 +14,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install repository-azure
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/repository-azure/{version}/repository-azure-{version}.zip.
+
[[repository-azure-remove]]
[float]
==== Removal
@@ -29,7 +31,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove repository-azure
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc
index a9658e1f21..d9ef6f5d2f 100644
--- a/docs/plugins/repository-gcs.asciidoc
+++ b/docs/plugins/repository-gcs.asciidoc
@@ -14,13 +14,15 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install repository-gcs
----------------------------------------------------------------
-// NOTCONSOLE
NOTE: The plugin requires new permission to be installed in order to work
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/repository-gcs/{version}/repository-gcs-{version}.zip.
+
[[repository-gcs-remove]]
[float]
==== Removal
@@ -31,7 +33,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove repository-gcs
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -133,6 +134,7 @@ A service account file looks like this:
"client_x509_cert_url": "..."
}
----
+// NOTCONSOLE
This file must be copied in the `config` directory of the elasticsearch installation and on
every node of the cluster.
diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc
index 02239a78b1..ab620b58cb 100644
--- a/docs/plugins/repository-hdfs.asciidoc
+++ b/docs/plugins/repository-hdfs.asciidoc
@@ -14,11 +14,13 @@ This plugin can be installed through the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install repository-hdfs
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on _every_ node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/repository-hdfs/{version}/repository-hdfs-{version}.zip.
+
[[repository-hdfs-remove]]
[float]
==== Removal
@@ -29,7 +31,6 @@ The plugin can be removed by specifying the _installed_ package:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove repository-hdfs
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc
index 3b35656b75..0671c84c2b 100644
--- a/docs/plugins/repository-s3.asciidoc
+++ b/docs/plugins/repository-s3.asciidoc
@@ -4,6 +4,8 @@
The S3 repository plugin adds support for using S3 as a repository for
{ref}/modules-snapshots.html[Snapshot/Restore].
+*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.*
+
[[repository-s3-install]]
[float]
==== Installation
@@ -14,11 +16,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install repository-s3
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/repository-s3/{version}/repository-s3-{version}.zip.
+
[[repository-s3-remove]]
[float]
==== Removal
@@ -29,7 +33,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove repository-s3
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
@@ -301,6 +304,7 @@ IAM in conjunction with pre-existing S3 buckets. Here is an example policy which
"Version": "2012-10-17"
}
----
+// NOTCONSOLE
You may further restrict the permissions by specifying a prefix within the bucket, in this example, named "foo".
@@ -344,6 +348,7 @@ You may further restrict the permissions by specifying a prefix within the bucke
"Version": "2012-10-17"
}
----
+// NOTCONSOLE
The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository
registration will fail. If you want elasticsearch to create the bucket instead, you can add the permission to create a
@@ -361,6 +366,7 @@ specific bucket like this:
]
}
----
+// NOTCONSOLE
[[repository-s3-endpoint]]
===== Using other S3 endpoint
diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc
index ac35342f2f..d2995189bd 100644
--- a/docs/plugins/store-smb.asciidoc
+++ b/docs/plugins/store-smb.asciidoc
@@ -13,11 +13,13 @@ This plugin can be installed using the plugin manager:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin install store-smb
----------------------------------------------------------------
-// NOTCONSOLE
The plugin must be installed on every node in the cluster, and each node must
be restarted after installation.
+This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
+{plugin_url}/store-smb/{version}/store-smb-{version}.zip.
+
[[store-smb-remove]]
[float]
==== Removal
@@ -28,7 +30,6 @@ The plugin can be removed with the following command:
----------------------------------------------------------------
sudo bin/elasticsearch-plugin remove store-smb
----------------------------------------------------------------
-// NOTCONSOLE
The node must be stopped before removing the plugin.
diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc
index 2ec7579ef1..8440ed1cd0 100644
--- a/docs/python/index.asciidoc
+++ b/docs/python/index.asciidoc
@@ -48,6 +48,7 @@ later, 0.4 releases are meant to work with Elasticsearch 0.90.*.
The recommended way to set your requirements in your `setup.py` or
`requirements.txt` is:
+[source,txt]
------------------------------------
# Elasticsearch 2.x
elasticsearch>=2.0.0,<3.0.0
@@ -124,4 +125,3 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc
index ada134238b..2b6523725b 100644
--- a/docs/reference/aggregations.asciidoc
+++ b/docs/reference/aggregations.asciidoc
@@ -58,6 +58,7 @@ The following snippet captures the basic structure of aggregations:
[,"<aggregation_name_2>" : { ... } ]*
}
--------------------------------------------------
+// NOTCONSOLE
The `aggregations` object (the key `aggs` can also be used) in the JSON holds the aggregations to be computed. Each aggregation
is associated with a logical name that the user defines (e.g. if the aggregation computes the average price, then it would
diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc
index e69877d97f..915a3c6a38 100644
--- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc
@@ -10,14 +10,18 @@ This aggregation relies on the <<mapping-parent-field,_parent field>> in the map
For example, let's say we have an index of questions and answers. The answer type has the following `_parent` field in the mapping:
[source,js]
--------------------------------------------------
+PUT child_example
{
- "answer" : {
- "_parent" : {
- "type" : "question"
+ "mappings": {
+ "answer" : {
+ "_parent" : {
+ "type" : "question"
+ }
}
}
}
--------------------------------------------------
+// CONSOLE
The question typed document contain a tag field and the answer typed documents contain an owner field. With the `children`
aggregation the tag buckets can be mapped to the owner buckets in a single request even though the two fields exist in
@@ -26,6 +30,7 @@ two different kinds of documents.
An example of a question typed document:
[source,js]
--------------------------------------------------
+PUT child_example/question/1
{
"body": "<p>I have Windows 2003 server and i bought a new Windows 2008 server...",
"title": "Whats the best way to file transfer my site from server to a newer one?",
@@ -33,13 +38,16 @@ An example of a question typed document:
"windows-server-2003",
"windows-server-2008",
"file-transfer"
- ],
+ ]
}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
-An example of an answer typed document:
+Examples of `answer` typed documents:
[source,js]
--------------------------------------------------
+PUT child_example/answer/1?parent=1&refresh
{
"owner": {
"location": "Norfolk, United Kingdom",
@@ -49,17 +57,30 @@ An example of an answer typed document:
"body": "<p>Unfortunately your pretty much limited to FTP...",
"creation_date": "2009-05-04T13:45:37.030"
}
+PUT child_example/answer/2?parent=1&refresh
+{
+ "owner": {
+ "location": "Norfolk, United Kingdom",
+ "display_name": "Troll",
+ "id": 49
+ },
+ "body": "<p>Use Linux...",
+ "creation_date": "2009-05-05T13:45:37.030"
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
The following request can be built that connects the two together:
[source,js]
--------------------------------------------------
+POST child_example/_search?size=0
{
"aggs": {
"top-tags": {
"terms": {
- "field": "tags",
+ "field": "tags.keyword",
"size": 10
},
"aggs": {
@@ -70,7 +91,7 @@ The following request can be built that connects the two together:
"aggs": {
"top-names": {
"terms": {
- "field": "owner.display_name",
+ "field": "owner.display_name.keyword",
"size": 10
}
}
@@ -81,6 +102,8 @@ The following request can be built that connects the two together:
}
}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
<1> The `type` points to type / mapping with the name `answer`.
@@ -91,245 +114,74 @@ Possible response:
[source,js]
--------------------------------------------------
{
+ "timed_out": false,
+ "took": 25,
+ "_shards": { "total": 5, "successful": 5, "failed": 0 },
+ "hits": { "total": 3, "max_score": 0.0, hits: [] },
"aggregations": {
"top-tags": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
"buckets": [
{
- "key": "windows-server-2003",
- "doc_count": 25365, <1>
+ "key": "file-transfer",
+ "doc_count": 1, <1>
"to-answers": {
- "doc_count": 36004, <2>
+ "doc_count": 2, <2>
"top-names": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
"buckets": [
{
"key": "Sam",
- "doc_count": 274
- },
- {
- "key": "chris",
- "doc_count": 19
- },
- {
- "key": "david",
- "doc_count": 14
- },
- ...
- ]
- }
- }
- },
- {
- "key": "linux",
- "doc_count": 18342,
- "to-answers": {
- "doc_count": 6655,
- "top-names": {
- "buckets": [
- {
- "key": "abrams",
- "doc_count": 25
- },
- {
- "key": "ignacio",
- "doc_count": 25
+ "doc_count": 1
},
{
- "key": "vazquez",
- "doc_count": 25
- },
- ...
+ "key": "Troll",
+ "doc_count": 1
+ }
]
}
}
},
{
- "key": "windows",
- "doc_count": 18119,
- "to-answers": {
- "doc_count": 24051,
- "top-names": {
- "buckets": [
- {
- "key": "molly7244",
- "doc_count": 265
- },
- {
- "key": "david",
- "doc_count": 27
- },
- {
- "key": "chris",
- "doc_count": 26
- },
- ...
- ]
- }
- }
- },
- {
- "key": "osx",
- "doc_count": 10971,
- "to-answers": {
- "doc_count": 5902,
- "top-names": {
- "buckets": [
- {
- "key": "diago",
- "doc_count": 4
- },
- {
- "key": "albert",
- "doc_count": 3
- },
- {
- "key": "asmus",
- "doc_count": 3
- },
- ...
- ]
- }
- }
- },
- {
- "key": "ubuntu",
- "doc_count": 8743,
- "to-answers": {
- "doc_count": 8784,
- "top-names": {
- "buckets": [
- {
- "key": "ignacio",
- "doc_count": 9
- },
- {
- "key": "abrams",
- "doc_count": 8
- },
- {
- "key": "molly7244",
- "doc_count": 8
- },
- ...
- ]
- }
- }
- },
- {
- "key": "windows-xp",
- "doc_count": 7517,
- "to-answers": {
- "doc_count": 13610,
- "top-names": {
- "buckets": [
- {
- "key": "molly7244",
- "doc_count": 232
- },
- {
- "key": "chris",
- "doc_count": 9
- },
- {
- "key": "john",
- "doc_count": 9
- },
- ...
- ]
- }
- }
- },
- {
- "key": "networking",
- "doc_count": 6739,
- "to-answers": {
- "doc_count": 2076,
- "top-names": {
- "buckets": [
- {
- "key": "molly7244",
- "doc_count": 6
- },
- {
- "key": "alnitak",
- "doc_count": 5
- },
- {
- "key": "chris",
- "doc_count": 3
- },
- ...
- ]
- }
- }
- },
- {
- "key": "mac",
- "doc_count": 5590,
- "to-answers": {
- "doc_count": 999,
- "top-names": {
- "buckets": [
- {
- "key": "abrams",
- "doc_count": 2
- },
- {
- "key": "ignacio",
- "doc_count": 2
- },
- {
- "key": "vazquez",
- "doc_count": 2
- },
- ...
- ]
- }
- }
- },
- {
- "key": "wireless-networking",
- "doc_count": 4409,
+ "key": "windows-server-2003",
+ "doc_count": 1, <1>
"to-answers": {
- "doc_count": 6497,
+ "doc_count": 2, <2>
"top-names": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
"buckets": [
{
- "key": "molly7244",
- "doc_count": 61
- },
- {
- "key": "chris",
- "doc_count": 5
+ "key": "Sam",
+ "doc_count": 1
},
{
- "key": "mike",
- "doc_count": 5
- },
- ...
+ "key": "Troll",
+ "doc_count": 1
+ }
]
}
}
},
{
- "key": "windows-8",
- "doc_count": 3601,
+ "key": "windows-server-2008",
+ "doc_count": 1, <1>
"to-answers": {
- "doc_count": 4263,
+ "doc_count": 2, <2>
"top-names": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
"buckets": [
{
- "key": "molly7244",
- "doc_count": 3
- },
- {
- "key": "msft",
- "doc_count": 2
+ "key": "Sam",
+ "doc_count": 1
},
{
- "key": "user172132",
- "doc_count": 2
- },
- ...
+ "key": "Troll",
+ "doc_count": 1
+ }
]
}
}
@@ -339,6 +191,7 @@ Possible response:
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took": 25/"took": $body.took/]
-<1> The number of question documents with the tag `windows-server-2003`.
-<2> The number of answer documents that are related to question documents with the tag `windows-server-2003`.
+<1> The number of question documents with the tag `file-transfer`, `windows-server-2003`, etc.
+<2> The number of answer documents that are related to question documents with the tag `file-transfer`, `windows-server-2003`, etc.
diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
index b54a288b58..0336e21c2e 100644
--- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
@@ -227,12 +227,14 @@ a multi-value metrics aggregation, and in case of a single-value metrics aggrega
The path must be defined in the following form:
+// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form
+[source,ebnf]
--------------------------------------------------
-AGG_SEPARATOR := '>'
-METRIC_SEPARATOR := '.'
-AGG_NAME := <the name of the aggregation>
-METRIC := <the name of the metric (in case of multi-value metrics aggregation)>
-PATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]
+AGG_SEPARATOR = '>' ;
+METRIC_SEPARATOR = '.' ;
+AGG_NAME = <the name of the aggregation> ;
+METRIC = <the name of the metric (in case of multi-value metrics aggregation)> ;
+PATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;
--------------------------------------------------
[source,js]
diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc
index 877da28931..68b2e8511f 100644
--- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc
@@ -244,9 +244,18 @@ determined and is given a value of -1 to indicate this.
==== Order
The order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by
-their `doc_count` descending. It is also possible to change this behaviour as follows:
+their `doc_count` descending. It is possible to change this behaviour as documented below:
-Ordering the buckets by their `doc_count` in an ascending manner:
+WARNING: Sorting by ascending `_count` or by sub aggregation is discouraged as it increases the
+<<search-aggregations-bucket-terms-aggregation-approximate-counts,error>> on document counts.
+It is fine when a single shard is queried, or when the field that is being aggregated was used
+as a routing key at index time: in these cases results will be accurate since shards have disjoint
+values. However otherwise, errors are unbounded. One particular case that could still be useful
+is sorting by <<search-aggregations-metrics-min-aggregation,`min`>> or
+<<search-aggregations-metrics-max-aggregation,`max`>> aggregation: counts will not be accurate
+but at least the top buckets will be correctly picked.
+
+Ordering the buckets by their doc `_count` in an ascending manner:
[source,js]
--------------------------------------------------
@@ -317,14 +326,15 @@ Ordering the buckets by multi value metrics sub-aggregation (identified by the a
}
--------------------------------------------------
-WARNING: Sorting by ascending `_count` or by sub aggregation is discouraged as it increases the
-<<search-aggregations-bucket-terms-aggregation-approximate-counts,error>> on document counts.
-It is fine when a single shard is queried, or when the field that is being aggregated was used
-as a routing key at index time: in these cases results will be accurate since shards have disjoint
-values. However otherwise, errors are unbounded. One particular case that could still be useful
-is sorting by <<search-aggregations-metrics-min-aggregation,`min`>> or
-<<search-aggregations-metrics-max-aggregation,`max`>> aggregation: counts will not be accurate
-but at least the top buckets will be correctly picked.
+[NOTE]
+.Pipeline aggs cannot be used for sorting
+=======================================
+
+<<search-aggregations-pipeline,Pipeline aggregations>> are run during the
+reduce phase after all other aggregations have already completed. For this
+reason, they cannot be used for ordering.
+
+=======================================
It is also possible to order the buckets based on a "deeper" aggregation in the hierarchy. This is supported as long
as the aggregations path are of a single-bucket type, where the last aggregation in the path may either be a single-bucket
@@ -334,12 +344,14 @@ a multi-value metrics aggregation, and in case of a single-value metrics aggrega
The path must be defined in the following form:
+// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form
+[source,ebnf]
--------------------------------------------------
-AGG_SEPARATOR := '>'
-METRIC_SEPARATOR := '.'
-AGG_NAME := <the name of the aggregation>
-METRIC := <the name of the metric (in case of multi-value metrics aggregation)>
-PATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]
+AGG_SEPARATOR = '>' ;
+METRIC_SEPARATOR = '.' ;
+AGG_NAME = <the name of the aggregation> ;
+METRIC = <the name of the metric (in case of multi-value metrics aggregation)> ;
+PATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;
--------------------------------------------------
[source,js]
diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc
index 73d7f3c26b..77fc7dfcd5 100644
--- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc
+++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc
@@ -71,6 +71,7 @@ The following chart shows how the error varies before and after the threshold:
////
To generate this chart use this gnuplot script:
+[source,gnuplot]
-------
#!/usr/bin/gnuplot
reset
@@ -95,6 +96,7 @@ plot "test.dat" using 1:2 title "threshold=100", \
and generate data in a 'test.dat' file using the below Java code:
+[source,java]
-------
private static double error(HyperLogLogPlusPlus h, long expected) {
double actual = h.cardinality(0);
@@ -140,7 +142,7 @@ counting millions of items.
On string fields that have a high cardinality, it might be faster to store the
hash of your field values in your index and then run the cardinality aggregation
on this field. This can either be done by providing hash values from client-side
-or by letting elasticsearch compute hash values for you by using the
+or by letting elasticsearch compute hash values for you by using the
{plugins}/mapper-murmur3.html[`mapper-murmur3`] plugin.
NOTE: Pre-computing hashes is usually only useful on very large and/or
diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc
index 1b955d2a89..83607b18df 100644
--- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc
+++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc
@@ -21,6 +21,7 @@ The top_hits aggregation returns regular search hits, because of this many per h
* <<search-request-explain,Explain>>
* <<search-request-named-queries-and-filters,Named filters and queries>>
* <<search-request-source-filtering,Source filtering>>
+* <<search-request-stored-fields,Stored fields>>
* <<search-request-script-fields,Script fields>>
* <<search-request-docvalue-fields,Doc value fields>>
* <<search-request-version,Include versions>>
diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc
index da78e18855..c9cd14d620 100644
--- a/docs/reference/aggregations/pipeline.asciidoc
+++ b/docs/reference/aggregations/pipeline.asciidoc
@@ -34,12 +34,14 @@ will be included in the final output.
Most pipeline aggregations require another aggregation as their input. The input aggregation is defined via the `buckets_path`
parameter, which follows a specific format:
+// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form
+[source,ebnf]
--------------------------------------------------
-AGG_SEPARATOR := '>'
-METRIC_SEPARATOR := '.'
-AGG_NAME := <the name of the aggregation>
-METRIC := <the name of the metric (in case of multi-value metrics aggregation)>
-PATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]
+AGG_SEPARATOR = '>' ;
+METRIC_SEPARATOR = '.' ;
+AGG_NAME = <the name of the aggregation> ;
+METRIC = <the name of the metric (in case of multi-value metrics aggregation)> ;
+PATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;
--------------------------------------------------
For example, the path `"my_bucket>my_stats.avg"` will path to the `avg` value in the `"my_stats"` metric, which is
@@ -165,7 +167,7 @@ POST /sales/_search
"count": "categories._bucket_count" <1>
},
"script": {
- "inline": "count != 0"
+ "inline": "params.count != 0"
}
}
}
diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc
index 1bfd080090..6a010650e5 100644
--- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc
+++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc
@@ -77,7 +77,7 @@ POST /sales/_search
"tShirtSales": "t-shirts>sales",
"totalSales": "total_sales"
},
- "script": "tShirtSales / totalSales * 100"
+ "script": "params.tShirtSales / params.totalSales * 100"
}
}
}
diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc
index 86a56a77aa..685c40f483 100644
--- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc
+++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc
@@ -23,7 +23,7 @@ A `bucket_selector` aggregation looks like this in isolation:
"my_var1": "the_sum", <1>
"my_var2": "the_value_count"
},
- "script": "my_var1 > my_var2"
+ "script": "params.my_var1 > params.my_var2"
}
}
--------------------------------------------------
@@ -66,7 +66,7 @@ POST /sales/_search
"buckets_path": {
"totalSales": "total_sales"
},
- "script": "totalSales > 200"
+ "script": "params.totalSales > 200"
}
}
}
diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc
index c917cc7aa0..53c7d913ad 100644
--- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc
+++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc
@@ -82,7 +82,7 @@ The `fingerprint` analyzer accepts the following parameters:
`stopwords`::
A pre-defined stop words list like `_english_` or an array containing a
- list of stop words. Defaults to `_none_`.
+ list of stop words. Defaults to `\_none_`.
`stopwords_path`::
diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
index 448f5289d5..7504be927d 100644
--- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
+++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
@@ -5,6 +5,21 @@ The `pattern` analyzer uses a regular expression to split the text into terms.
The regular expression should match the *token separators* not the tokens
themselves. The regular expression defaults to `\W+` (or all non-word characters).
+[WARNING]
+.Beware of Pathological Regular Expressions
+========================================
+
+The pattern analyzer uses
+http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java Regular Expressions].
+
+A badly written regular expression could run very slowly or even throw a
+StackOverflowError and cause the node it is running on to exit suddenly.
+
+Read more about http://www.regular-expressions.info/catastrophic.html[pathological regular expressions and how to avoid them].
+
+========================================
+
+
[float]
=== Definition
@@ -162,7 +177,7 @@ The `pattern` analyzer accepts the following parameters:
`stopwords`::
A pre-defined stop words list like `_english_` or an array containing a
- list of stop words. Defaults to `_none_`.
+ list of stop words. Defaults to `\_none_`.
`stopwords_path`::
diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
index bc49be5079..eacbb1c3ca 100644
--- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
+++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
@@ -145,7 +145,7 @@ The `standard` analyzer accepts the following parameters:
`stopwords`::
A pre-defined stop words list like `_english_` or an array containing a
- list of stop words. Defaults to `_none_`.
+ list of stop words. Defaults to `\_none_`.
`stopwords_path`::
diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc
index 3f4bf9aa05..32ee14d8f5 100644
--- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc
+++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc
@@ -5,6 +5,20 @@ The `pattern_replace` character filter uses a regular expression to match
characters which should be replaced with the specified replacement string.
The replacement string can refer to capture groups in the regular expression.
+[WARNING]
+.Beware of Pathological Regular Expressions
+========================================
+
+The pattern replace character filter uses
+http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java Regular Expressions].
+
+A badly written regular expression could run very slowly or even throw a
+StackOverflowError and cause the node it is running on to exit suddenly.
+
+Read more about http://www.regular-expressions.info/catastrophic.html[pathological regular expressions and how to avoid them].
+
+========================================
+
[float]
=== Configuration
diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc
index 15a911b8ed..89cce11a61 100644
--- a/docs/reference/analysis/tokenfilters.asciidoc
+++ b/docs/reference/analysis/tokenfilters.asciidoc
@@ -87,4 +87,6 @@ include::tokenfilters/apostrophe-tokenfilter.asciidoc[]
include::tokenfilters/decimal-digit-tokenfilter.asciidoc[]
-include::tokenfilters/fingerprint-tokenfilter.asciidoc[] \ No newline at end of file
+include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
+
+include::tokenfilters/minhash-tokenfilter.asciidoc[] \ No newline at end of file
diff --git a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
new file mode 100644
index 0000000000..eb6a4d820e
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
@@ -0,0 +1,22 @@
+[[analysis-minhash-tokenfilter]]
+=== Minhash Token Filter
+
+A token filter of type `min_hash` hashes each token of the token stream and divides
+the resulting hashes into buckets, keeping the lowest-valued hashes per
+bucket. It then returns these hashes as tokens.
+
+The following are settings that can be set for a `min_hash` token filter.
+
+[cols="<,<", options="header",]
+|=======================================================================
+|Setting |Description
+|`hash_count` |The number of hashes to hash the token stream with. Defaults to `1`.
+
+|`bucket_count` |The number of buckets to divide the minhashes into. Defaults to `512`.
+
+|`hash_set_size` |The number of minhashes to keep per bucket. Defaults to `1`.
+
+|`with_rotation` |Whether or not to fill empty buckets with the value of the first non-empty
+bucket to its circular right. Only takes effect if hash_set_size is equal to one.
+Defaults to `true` if bucket_count is greater than one, else `false`.
+|=======================================================================
diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
index 7c919b56b9..ccde46a3fd 100644
--- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
@@ -7,6 +7,20 @@ Patterns are not anchored to the beginning and end of the string, so
each pattern can match multiple times, and matches are allowed to
overlap.
+[WARNING]
+.Beware of Pathological Regular Expressions
+========================================
+
+The pattern capture token filter uses
+http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java Regular Expressions].
+
+A badly written regular expression could run very slowly or even throw a
+StackOverflowError and cause the node it is running on to exit suddenly.
+
+Read more about http://www.regular-expressions.info/catastrophic.html[pathological regular expressions and how to avoid them].
+
+========================================
+
For instance a pattern like :
[source,js]
diff --git a/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
index 54e08426e8..bc8cdc385b 100644
--- a/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
@@ -7,3 +7,17 @@ defined using the `pattern` parameter, and the replacement string can be
provided using the `replacement` parameter (supporting referencing the
original text, as explained
http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here]).
+
+[WARNING]
+.Beware of Pathological Regular Expressions
+========================================
+
+The pattern replace token filter uses
+http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java Regular Expressions].
+
+A badly written regular expression could run very slowly or even throw a
+StackOverflowError and cause the node it is running on to exit suddenly.
+
+Read more about http://www.regular-expressions.info/catastrophic.html[pathological regular expressions and how to avoid them].
+
+========================================
diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
index ead0e118af..f3b5a19566 100644
--- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
@@ -76,4 +76,4 @@ Elasticsearch provides the following predefined list of languages:
`_portuguese_`, `_romanian_`, `_russian_`, `_sorani_`, `_spanish_`,
`_swedish_`, `_thai_`, `_turkish_`.
-For the empty stopwords list (to disable stopwords) use: `_none_`.
+For the empty stopwords list (to disable stopwords) use: `\_none_`.
diff --git a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc
index c96fd08c95..5e1b33512d 100644
--- a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc
+++ b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc
@@ -8,6 +8,20 @@ terms.
The default pattern is `\W+`, which splits text whenever it encounters
non-word characters.
+[WARNING]
+.Beware of Pathological Regular Expressions
+========================================
+
+The pattern tokenizer uses
+http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java Regular Expressions].
+
+A badly written regular expression could run very slowly or even throw a
+StackOverflowError and cause the node it is running on to exit suddenly.
+
+Read more about http://www.regular-expressions.info/catastrophic.html[pathological regular expressions and how to avoid them].
+
+========================================
+
[float]
=== Example output
diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc
index 26c1a7476e..e2d10ca62b 100644
--- a/docs/reference/api-conventions.asciidoc
+++ b/docs/reference/api-conventions.asciidoc
@@ -19,7 +19,7 @@ API, unless otherwise specified.
Most APIs that refer to an `index` parameter support execution across multiple indices,
using simple `test1,test2,test3` notation (or `_all` for all indices). It also
-support wildcards, for example: `test*`, and the ability to "add" (`+`)
+support wildcards, for example: `test*` or `*test` or `te*t` or `*test*`, and the ability to "add" (`+`)
and "remove" (`-`), for example: `+test*,-test3`.
All multi indices API support the following url query string parameters:
@@ -88,12 +88,18 @@ You must enclose date math index name expressions within angle brackets. For exa
[source,js]
----------------------------------------------------------------------
-curl -XGET 'localhost:9200/<logstash-{now%2Fd-2d}>/_search' {
+GET /<logstash-{now%2Fd}>/_search
+{
"query" : {
- ...
+ "match": {
+ "test": "data"
+ }
}
}
----------------------------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT logstash-2016.09.20\n/]
+// TEST[s/\{now/{2016.09.20||/]
NOTE: The `/` used for date rounding must be url encoded as `%2F` in any url.
@@ -102,11 +108,11 @@ they resolve to given the current time is 22rd March 2024 noon utc.
[options="header"]
|======
-| Expression |Resolves to
-| `<logstash-{now/d}>` | `logstash-2024.03.22`
-| `<logstash-{now/M}>` | `logstash-2024.03.01`
-| `<logstash-{now/M{YYYY.MM}}>` | `logstash-2024.03`
-| `<logstash-{now/M-1M{YYYY.MM}}>` | `logstash-2024.02`
+| Expression |Resolves to
+| `<logstash-{now/d}>` | `logstash-2024.03.22`
+| `<logstash-{now/M}>` | `logstash-2024.03.01`
+| `<logstash-{now/M{YYYY.MM}}>` | `logstash-2024.03`
+| `<logstash-{now/M-1M{YYYY.MM}}>` | `logstash-2024.02`
| `<logstash-{now/d{YYYY.MM.dd\|+12:00}}>` | `logstash-2024.03.23`
|======
@@ -121,12 +127,18 @@ three days, assuming the indices use the default Logstash index name format,
[source,js]
----------------------------------------------------------------------
-curl -XGET 'localhost:9200/<logstash-{now%2Fd-2d}>,<logstash-{now%2Fd-1d}>,<logstash-{now%2Fd}>/_search' {
+GET /<logstash-{now%2Fd-2d}>,<logstash-{now%2Fd-1d}>,<logstash-{now%2Fd}>/_search
+{
"query" : {
- ...
+ "match": {
+ "test": "data"
+ }
}
}
----------------------------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT logstash-2016.09.20\nPUT logstash-2016.09.19\nPUT logstash-2016.09.18\n/]
+// TEST[s/\{now/{2016.09.20||/]
[[common-options]]
== Common options
@@ -199,82 +211,134 @@ All REST APIs accept a `filter_path` parameter that can be used to reduce
the response returned by elasticsearch. This parameter takes a comma
separated list of filters expressed with the dot notation:
-[source,sh]
+[source,js]
+--------------------------------------------------
+GET /_search?q=elasticsearch&filter_path=took,hits.hits._id,hits.hits._score
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+Responds:
+
+[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/_search?pretty&filter_path=took,hits.hits._id,hits.hits._score'
{
"took" : 3,
"hits" : {
"hits" : [
{
- "_id" : "3640",
- "_score" : 1.0
- },
- {
- "_id" : "3642",
- "_score" : 1.0
+ "_id" : "0",
+ "_score" : 1.6375021
}
]
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took" : 3/"took" : $body.took/]
+// TESTRESPONSE[s/1.6375021/$body.hits.hits.0._score/]
It also supports the `*` wildcard character to match any field or part
of a field's name:
[source,sh]
--------------------------------------------------
-curl -XGET 'localhost:9200/_nodes/stats?filter_path=nodes.*.ho*'
+GET /_cluster/state?filter_path=metadata.indices.*.stat*
+--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT twitter\n/]
+
+Responds:
+
+[source,sh]
+--------------------------------------------------
{
- "nodes" : {
- "lvJHed8uQQu4brS-SXKsNA" : {
- "host" : "portable"
+ "metadata" : {
+ "indices" : {
+ "twitter": {"state": "open"}
}
}
}
--------------------------------------------------
+// TESTRESPONSE
And the `**` wildcard can be used to include fields without knowing the
exact path of the field. For example, we can return the Lucene version
of every segment with this request:
-[source,sh]
+[source,js]
+--------------------------------------------------
+GET /_cluster/state?filter_path=routing_table.indices.**.state
+--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT twitter\n/]
+
+Responds:
+
+[source,js]
--------------------------------------------------
-curl 'localhost:9200/_segments?pretty&filter_path=indices.**.version'
{
- "indices" : {
- "movies" : {
- "shards" : {
- "0" : [ {
- "segments" : {
- "_0" : {
- "version" : "5.2.0"
- }
- }
- } ],
- "2" : [ {
- "segments" : {
- "_0" : {
- "version" : "5.2.0"
- }
- }
- } ]
- }
- },
- "books" : {
- "shards" : {
- "0" : [ {
- "segments" : {
- "_0" : {
- "version" : "5.2.0"
- }
- }
- } ]
+ "routing_table": {
+ "indices": {
+ "twitter": {
+ "shards": {
+ "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
+ "1": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
+ "2": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
+ "3": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
+ "4": [{"state": "STARTED"}, {"state": "UNASSIGNED"}]
+ }
}
}
}
}
--------------------------------------------------
+// TESTRESPONSE
+
+It is also possible to exclude one or more fields by prefixing the filter with the char `-`:
+
+[source,js]
+--------------------------------------------------
+GET /_count?filter_path=-_shards
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+Responds:
+
+[source,js]
+--------------------------------------------------
+{
+ "count" : 5
+}
+--------------------------------------------------
+// TESTRESPONSE
+
+And for more control, both inclusive and exclusive filters can be combined in the same expression. In
+this case, the exclusive filters will be applied first and the result will be filtered again using the
+inclusive filters:
+
+[source,js]
+--------------------------------------------------
+GET /_cluster/state?filter_path=metadata.indices.*.state,-metadata.indices.logstash-*
+--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT index-1\nPUT index-2\nPUT index-3\nPUT logstash-2016.01\n/]
+
+Responds:
+
+[source,js]
+--------------------------------------------------
+{
+ "metadata" : {
+ "indices" : {
+ "index-1" : {"state" : "open"},
+ "index-2" : {"state" : "open"},
+ "index-3" : {"state" : "open"}
+ }
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE
Note that elasticsearch sometimes returns directly the raw value of a field,
like the `_source` field. If you want to filter `_source` fields, you should
@@ -282,21 +346,33 @@ consider combining the already existing `_source` parameter (see
<<get-source-filtering,Get API>> for more details) with the `filter_path`
parameter like this:
-[source,sh]
+[source,js]
+--------------------------------------------------
+POST /library/book?refresh
+{"title": "Book #1", "rating": 200.1}
+POST /library/book?refresh
+{"title": "Book #2", "rating": 1.7}
+POST /library/book?refresh
+{"title": "Book #3", "rating": 0.1}
+GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc
+--------------------------------------------------
+// CONSOLE
+
+[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/_search?pretty&filter_path=hits.hits._source&_source=title'
{
"hits" : {
"hits" : [ {
- "_source":{"title":"Book #2"}
- }, {
"_source":{"title":"Book #1"}
}, {
+ "_source":{"title":"Book #2"}
+ }, {
"_source":{"title":"Book #3"}
} ]
}
}
--------------------------------------------------
+// TESTRESPONSE
[float]
@@ -307,30 +383,66 @@ The `flat_settings` flag affects rendering of the lists of settings. When
[source,js]
--------------------------------------------------
+GET twitter/_settings?flat_settings=true
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+Returns:
+
+[source,js]
+--------------------------------------------------
{
- "persistent" : { },
- "transient" : {
- "discovery.zen.minimum_master_nodes" : "1"
+ "twitter" : {
+ "settings": {
+ "index.number_of_replicas": "1",
+ "index.number_of_shards": "1",
+ "index.creation_date": "1474389951325",
+ "index.uuid": "n6gzFZTgS664GUfx0Xrpjw",
+ "index.version.created": ...,
+ "index.provided_name" : "twitter"
+ }
}
}
--------------------------------------------------
+// TESTRESPONSE[s/1474389951325/$body.twitter.settings.index\\\\.creation_date/]
+// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.twitter.settings.index\\\\.uuid/]
+// TESTRESPONSE[s/"index.version.created": \.\.\./"index.version.created": $body.twitter.settings.index\\\\.version\\\\.created/]
When the `flat_settings` flag is `false` settings are returned in a more
human readable structured format:
[source,js]
--------------------------------------------------
+GET twitter/_settings?flat_settings=false
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+Returns:
+
+[source,js]
+--------------------------------------------------
{
- "persistent" : { },
- "transient" : {
- "discovery" : {
- "zen" : {
- "minimum_master_nodes" : "1"
+ "twitter" : {
+ "settings" : {
+ "index" : {
+ "number_of_replicas": "1",
+ "number_of_shards": "1",
+ "creation_date": "1474389951325",
+ "uuid": "n6gzFZTgS664GUfx0Xrpjw",
+ "version": {
+ "created": ...
+ },
+ "provided_name" : "twitter"
}
}
}
}
--------------------------------------------------
+// TESTRESPONSE[s/1474389951325/$body.twitter.settings.index.creation_date/]
+// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.twitter.settings.index.uuid/]
+// TESTRESPONSE[s/"created": \.\.\./"created": $body.twitter.settings.index.version.created/]
By default the `flat_settings` is set to `false`.
@@ -425,10 +537,6 @@ Centimeter:: `cm` or `centimeters`
Millimeter:: `mm` or `millimeters`
Nautical mile:: `NM`, `nmi` or `nauticalmiles`
-The `precision` parameter in the <<query-dsl-geohash-cell-query>> accepts
-distances with the above units, but if no unit is specified, then the
-precision is interpreted as the length of the geohash.
-
[[fuzziness]]
[float]
=== Fuzziness
diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc
index 9a0bc0f5cc..f29bc9badd 100644
--- a/docs/reference/cat.asciidoc
+++ b/docs/reference/cat.asciidoc
@@ -59,11 +59,11 @@ only those columns to appear.
[source,sh]
--------------------------------------------------
% curl 'n1:9200/_cat/nodes?h=ip,port,heapPercent,name'
-192.168.56.40 9300 40.3 Captain Universe
-192.168.56.20 9300 15.3 Kaluu
-192.168.56.50 9300 17.0 Yellowjacket
-192.168.56.10 9300 12.3 Remy LeBeau
-192.168.56.30 9300 43.9 Ramsey, Doug
+192.168.56.40 9300 40.3 bGG90GE
+192.168.56.20 9300 15.3 H5dfFeA
+192.168.56.50 9300 17.0 I8hydUG
+192.168.56.10 9300 12.3 DKDM97B
+192.168.56.30 9300 43.9 6-bjhwl
--------------------------------------------------
You can also request multiple columns using simple wildcards like
@@ -186,3 +186,5 @@ include::cat/shards.asciidoc[]
include::cat/segments.asciidoc[]
include::cat/snapshots.asciidoc[]
+
+include::cat/templates.asciidoc[]
diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc
index 4c354a814d..7a0236b849 100644
--- a/docs/reference/cat/allocation.asciidoc
+++ b/docs/reference/cat/allocation.asciidoc
@@ -8,9 +8,9 @@ and how much disk space they are using.
--------------------------------------------------
% curl '192.168.56.10:9200/_cat/allocation?v'
shards disk.indices disk.used disk.avail disk.total disk.percent host ip node
- 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.10 192.168.56.10 Jarella
- 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.30 192.168.56.30 Solarr
- 1 3.0gb 5.5gb 72.3gb 77.8gb 7.6 192.168.56.20 192.168.56.20 Adam II
+ 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.10 192.168.56.10 bGG90GE
+ 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.30 192.168.56.30 I8hydUG
+ 1 3.0gb 5.5gb 72.3gb 77.8gb 7.6 192.168.56.20 192.168.56.20 H5dfFeA
--------------------------------------------------
Here we can see that each node has been allocated a single shard and
diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc
index 854853e4d3..86b14c65f8 100644
--- a/docs/reference/cat/fielddata.asciidoc
+++ b/docs/reference/cat/fielddata.asciidoc
@@ -7,13 +7,13 @@ on every data node in the cluster.
[source,sh]
--------------------------------------------------
% curl '192.168.56.10:9200/_cat/fielddata?v'
-id host ip node field size
-c223lARiSGeezlbrcugAYQ myhost1 10.20.100.200 Jessica Jones body 159.8kb
-c223lARiSGeezlbrcugAYQ myhost1 10.20.100.200 Jessica Jones text 225.7kb
-waPCbitNQaCL6xC8VxjAwg myhost2 10.20.100.201 Adversary body 159.8kb
-waPCbitNQaCL6xC8VxjAwg myhost2 10.20.100.201 Adversary text 275.3kb
-yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip body 109.2kb
-yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip text 175.3kb
+id host ip node field size
+bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb
+bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE text 225.7kb
+H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb
+H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA text 275.3kb
+I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb
+I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG text 175.3kb
--------------------------------------------------
Fields can be specified either as a query parameter, or in the URL path:
@@ -21,19 +21,19 @@ Fields can be specified either as a query parameter, or in the URL path:
[source,sh]
--------------------------------------------------
% curl '192.168.56.10:9200/_cat/fielddata?v&fields=body'
-id host ip node field size
-c223lARiSGeezlbrcugAYQ myhost1 10.20.100.200 Jessica Jones body 159.8kb
-waPCbitNQaCL6xC8VxjAwg myhost2 10.20.100.201 Adversary body 159.8kb
-yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip body 109.2kb
+id host ip node field size
+bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb
+H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb
+I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb
% curl '192.168.56.10:9200/_cat/fielddata/body,text?v'
-id host ip node field size
-c223lARiSGeezlbrcugAYQ myhost1 10.20.100.200 Jessica Jones body 159.8kb
-c223lARiSGeezlbrcugAYQ myhost1 10.20.100.200 Jessica Jones text 225.7kb
-waPCbitNQaCL6xC8VxjAwg myhost2 10.20.100.201 Adversary body 159.8kb
-waPCbitNQaCL6xC8VxjAwg myhost2 10.20.100.201 Adversary text 275.3kb
-yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip body 109.2kb
-yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip text 175.3kb
+id host ip node field size
+bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb
+bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE text 225.7kb
+H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb
+H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA text 275.3kb
+I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb
+I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG text 175.3kb
--------------------------------------------------
The output shows the individual fielddata for the`body` and `text` fields, one row per field per node.
diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc
index 3e8938faab..22be4067f8 100644
--- a/docs/reference/cat/indices.asciidoc
+++ b/docs/reference/cat/indices.asciidoc
@@ -33,7 +33,7 @@ Which indices are yellow?
[source,sh]
--------------------------------------------------
-% curl localhost:9200/_cat/indices | grep ^yell
+% curl localhost:9200/_cat/indices?health=yellow
yellow open wiki 2 1 6401 1115 151.4mb 151.4mb
yellow open twitter 5 1 11434 0 32mb 32mb
--------------------------------------------------
@@ -52,7 +52,7 @@ How many merge operations have the shards for the `wiki` completed?
[source,sh]
--------------------------------------------------
-% curl 'localhost:9200/_cat/indices/wiki?pri&v&h=health,index,prirep,docs.count,mt'
+% curl 'localhost:9200/_cat/indices/wiki?pri&v&h=health,index,pri,rep,docs.count,mt'
health index docs.count mt pri.mt
green wiki 9646 16 16
--------------------------------------------------
diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc
index 6189948d91..caed564d7b 100644
--- a/docs/reference/cat/master.asciidoc
+++ b/docs/reference/cat/master.asciidoc
@@ -8,7 +8,7 @@ master's node ID, bound IP address, and node name.
--------------------------------------------------
% curl 'localhost:9200/_cat/master?v'
id ip node
-Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA
--------------------------------------------------
This information is also available via the `nodes` command, but this
@@ -19,9 +19,9 @@ all nodes agree on the master:
--------------------------------------------------
% pssh -i -h list.of.cluster.hosts curl -s localhost:9200/_cat/master
[1] 19:16:37 [SUCCESS] es3.vm
-Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA
[2] 19:16:37 [SUCCESS] es2.vm
-Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA
[3] 19:16:37 [SUCCESS] es1.vm
-Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA
--------------------------------------------------
diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc
index 2c73d715e3..1677583a70 100644
--- a/docs/reference/cat/nodeattrs.asciidoc
+++ b/docs/reference/cat/nodeattrs.asciidoc
@@ -6,9 +6,9 @@ The `nodeattrs` command shows custom node attributes.
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
% curl 192.168.56.10:9200/_cat/nodeattrs
-node host ip attr value
-Black Bolt epsilon 192.168.1.8 rack rack314
-Black Bolt epsilon 192.168.1.8 azone us-east-1
+node host ip attr value
+DKDM97B epsilon 192.168.1.8 rack rack314
+DKDM97B epsilon 192.168.1.8 azone us-east-1
--------------------------------------------------
The first few columns give you basic info per node.
@@ -16,9 +16,9 @@ The first few columns give you basic info per node.
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
-node host ip
-Black Bolt epsilon 192.168.1.8
-Black Bolt epsilon 192.168.1.8
+node host ip
+DKDM97B epsilon 192.168.1.8
+DKDM97B epsilon 192.168.1.8
--------------------------------------------------
@@ -52,15 +52,15 @@ mode (`v`). The header name will match the supplied value (e.g.,
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
% curl 192.168.56.10:9200/_cat/nodeattrs?v&h=name,pid,attr,value
-name pid attr value
-Black Bolt 28000 rack rack314
-Black Bolt 28000 azone us-east-1
+name pid attr value
+DKDM97B 28000 rack rack314
+DKDM97B 28000 azone us-east-1
--------------------------------------------------
[cols="<,<,<,<,<",options="header",subs="normal"]
|=======================================================================
|Header |Alias |Appear by Default |Description |Example
-|`node`|`name`|Yes|Name of the node|Black Bolt
+|`node`|`name`|Yes|Name of the node|DKDM97B
|`id` |`nodeId` |No |Unique node ID |k0zy
|`pid` |`p` |No |Process ID |13061
|`host` |`h` |Yes |Host name |n1
diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc
index 288a28f364..b0b152d4c5 100644
--- a/docs/reference/cat/nodes.asciidoc
+++ b/docs/reference/cat/nodes.asciidoc
@@ -3,48 +3,35 @@
The `nodes` command shows the cluster topology.
-["source","sh",subs="attributes,callouts"]
---------------------------------------------------
-% curl 192.168.56.10:9200/_cat/nodes
-SP4H 4727 192.168.56.30 9300 {version} {jdk} 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h mdi - Boneyard
-_uhJ 5134 192.168.56.10 9300 {version} {jdk} 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h mdi * Athena
-HfDp 4562 192.168.56.20 9300 {version} {jdk} 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h mdi - Zarek
---------------------------------------------------
-
-The first few columns tell you where your nodes live. For sanity it
-also tells you what version of ES and the JVM each one runs.
-
-["source","sh",subs="attributes,callouts"]
+[source,sh]
--------------------------------------------------
-nodeId pid ip port version jdk
-u2PZ 4234 192.168.56.30 9300 {version} {jdk}
-URzf 5443 192.168.56.10 9300 {version} {jdk}
-ActN 3806 192.168.56.20 9300 {version} {jdk}
+% GET /_cat/nodes
+192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE
+192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG
+192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA
--------------------------------------------------
-
-The next few give a picture of your heap, memory, and load.
+The first few columns tell you where your nodes live and give
+a picture of your heap, memory, cpu and load.
[source,sh]
--------------------------------------------------
-diskAvail heapPercent heapMax ramPercent ramMax load
- 72.1gb 31.3 93.9mb 81 239.1mb 0.24
- 72.1gb 19.6 93.9mb 82 239.1mb 0.05
- 72.2gb 64.9 93.9mb 84 239.1mb 0.12
+ip heap.percent ram.percent cpu load_1m load_5m load_15m
+192.168.56.30 9 78 22 1.80 2.05 2.51
+192.168.56.10 6 75 14 1.24 2.45 1.37
+192.168.56.20 5 71 12 1.07 1.05 1.11
--------------------------------------------------
The last columns provide ancillary information that can often be
useful when looking at the cluster as a whole, particularly large
-ones. How many master-eligible nodes do I have? How many client
-nodes? It looks like someone restarted a node recently; which one was
-it?
+ones. How many master-eligible nodes do I have?
[source,sh]
--------------------------------------------------
-uptime node.role master name
- 3.5h di - Boneyard
- 3.5h md * Athena
- 3.5h i - Zarek
+node.role master name
+mdi * bGG90GE
+md - I8hydUG
+di - H5dfFeA
--------------------------------------------------
[float]
@@ -65,7 +52,7 @@ by default. To have the headers appear in the output, use verbose
mode (`v`). The header name will match the supplied value (e.g.,
`pid` versus `p`). For example:
-["source","sh",subs="attributes,callouts"]
+[source,sh]
--------------------------------------------------
% curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m
id ip port v m
@@ -102,13 +89,15 @@ descriptors |123
descriptors percentage |1
|`file_desc.max` |`fdm`, `fileDescriptorMax` |No |Maximum number of file
descriptors |1024
-|`load` |`l` |No |Most recent load average |0.22
|`cpu` | |No |Recent system CPU usage as percent |12
+|`load_1m` |`l` |No |Most recent load average |0.22
+|`load_5m` |`l` |No |Load average for the last five minutes |0.78
+|`load_15m` |`l` |No |Load average for the last fifteen minutes |1.24
|`uptime` |`u` |No |Node uptime |17.3m
|`node.role` |`r`, `role`, `nodeRole` |Yes |Master eligible node (m);
Data node (d); Ingest node (i); Coordinating node only (-) |mdi
|`master` |`m` |Yes |Elected master (*); Not elected master (-) |*
-|`name` |`n` |Yes |Node name |Venom
+|`name` |`n` |Yes |Node name |I8hydUG
|`completion.size` |`cs`, `completionSize` |No |Size of completion |0b
|`fielddata.memory_size` |`fm`, `fielddataMemory` |No |Used fielddata
cache memory |0b
@@ -152,6 +141,8 @@ of current indexing operations |0
indexing |134ms
|`indexing.index_total` |`iito`, `indexingIndexTotal` |No |Number of
indexing operations |1
+|`indexing.index_failed` |`iif`, `indexingIndexFailed` |No |Number of
+failed indexing operations |0
|`merges.current` |`mc`, `mergesCurrent` |No |Number of current
merge operations |0
|`merges.current_docs` |`mcd`, `mergesCurrentDocs` |No |Number of
@@ -166,15 +157,6 @@ documents |0
merges |0b
|`merges.total_time` |`mtt`, `mergesTotalTime` |No |Time spent merging
documents |0s
-|`percolate.current` |`pc`, `percolateCurrent` |No |Number of current
-percolations |0
-|`percolate.memory_size` |`pm`, `percolateMemory` |No |Memory used by
-current percolations |0b
-|`percolate.queries` |`pq`, `percolateQueries` |No |Number of
-registered percolation queries |0
-|`percolate.time` |`pti`, `percolateTime` |No |Time spent
-percolating |0s
-|`percolate.total` |`pto`, `percolateTotal` |No |Total percolations |0
|`refresh.total` |`rto`, `refreshTotal` |No |Number of refreshes |16
|`refresh.time` |`rti`, `refreshTime` |No |Time spent in refreshes |91ms
|`script.compilations` |`scrcc`, `scriptCompilations` |No |Total script compilations |17
@@ -203,4 +185,9 @@ segments |1.4kb
|Memory used by index writer |18mb
|`segments.version_map_memory` |`svmm`, `segmentsVersionMapMemory` |No
|Memory used by version map |1.0kb
+|`segments.fixed_bitset_memory` |`sfbm`, `fixedBitsetMemory` |No
+|Memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields |1.0kb
+|`suggest.current` |`suc`, `suggestCurrent` |No |Number of current suggest operations |0
+|`suggest.time` |`suti`, `suggestTime` |No |Time spent in suggest |0
+|`suggest.total` |`suto`, `suggestTotal` |No |Number of suggest operations |0
|=======================================================================
diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc
index 0af1faa5c9..a1d0dc8870 100644
--- a/docs/reference/cat/plugins.asciidoc
+++ b/docs/reference/cat/plugins.asciidoc
@@ -7,8 +7,8 @@ The `plugins` command provides a view per node of running plugins. This informat
------------------------------------------------------------------------------
% curl 'localhost:9200/_cat/plugins?v'
name component version description
-Abraxas discovery-gce 5.0.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.
-Abraxas lang-javascript 5.0.0 The JavaScript language plugin allows to have javascript as the language of scripts to execute.
+I8hydUG discovery-gce 5.0.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.
+I8hydUG lang-javascript 5.0.0 The JavaScript language plugin allows to have javascript as the language of scripts to execute.
-------------------------------------------------------------------------------
We can tell quickly how many plugins per node we have and which versions.
diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc
index 6fe748096d..7cdeee684d 100644
--- a/docs/reference/cat/recovery.asciidoc
+++ b/docs/reference/cat/recovery.asciidoc
@@ -15,12 +15,13 @@ are no shards in transit from one node to another:
[source,sh]
----------------------------------------------------------------------------
> curl -XGET 'localhost:9200/_cat/recovery?v'
-index shard time type stage source_host source_node target_host target_node repository snapshot files files_percent bytes bytes_percent total_files total_bytes translog translog_percent total_translog
-index 0 87ms store done 127.0.0.1 Athena 127.0.0.1 Athena n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
-index 1 97ms store done 127.0.0.1 Athena 127.0.0.1 Athena n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
-index 2 93ms store done 127.0.0.1 Athena 127.0.0.1 Athena n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
-index 3 90ms store done 127.0.0.1 Athena 127.0.0.1 Athena n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
-index 4 9ms store done 127.0.0.1 Athena 127.0.0.1 Athena n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
+index shard time type stage source_host source_node target_host target_node repository snapshot files files_percent bytes bytes_percent
+ total_files total_bytes translog translog_percent total_translog
+index 0 87ms store done 127.0.0.1 I8hydUG 127.0.0.1 I8hydUG n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
+index 1 97ms store done 127.0.0.1 I8hydUG 127.0.0.1 I8hydUG n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
+index 2 93ms store done 127.0.0.1 I8hydUG 127.0.0.1 I8hydUG n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
+index 3 90ms store done 127.0.0.1 I8hydUG 127.0.0.1 I8hydUG n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
+index 4 9ms store done 127.0.0.1 I8hydUG 127.0.0.1 I8hydUG n/a n/a 0 0.0% 0 0.0% 0 0 0 100.0% 0
---------------------------------------------------------------------------
In the above case, the source and target nodes are the same because the recovery
diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc
index 8eece07028..4d3acc7e49 100644
--- a/docs/reference/cat/shards.asciidoc
+++ b/docs/reference/cat/shards.asciidoc
@@ -10,9 +10,9 @@ Here we see a single index, with three primary shards and no replicas:
[source,sh]
--------------------------------------------------
% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
-wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
-wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 bGG90GE
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 I8hydUG
--------------------------------------------------
[float]
@@ -26,9 +26,9 @@ some bandwidth by supplying an index pattern to the end.
[source,sh]
--------------------------------------------------
% curl 192.168.56.20:9200/_cat/shards/wiki*
-wiki2 0 p STARTED 197 3.2mb 192.168.56.10 Stiletto
-wiki2 1 p STARTED 205 5.9mb 192.168.56.30 Frankie Raye
-wiki2 2 p STARTED 275 7.8mb 192.168.56.20 Commander Kraken
+wiki2 0 p STARTED 197 3.2mb 192.168.56.10 H5dfFeA
+wiki2 1 p STARTED 205 5.9mb 192.168.56.30 bGG90GE
+wiki2 2 p STARTED 275 7.8mb 192.168.56.20 I8hydUG
--------------------------------------------------
@@ -44,8 +44,8 @@ shards. Where are they from and where are they going?
% curl 192.168.56.10:9200/_cat/health
1384315316 20:01:56 foo green 3 3 12 6 2 0 0
% curl 192.168.56.10:9200/_cat/shards | fgrep RELO
-wiki1 0 r RELOCATING 3014 31.1mb 192.168.56.20 Commander Kraken -> 192.168.56.30 Frankie Raye
-wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 Stiletto -> 192.168.56.30 Frankie Raye
+wiki1 0 r RELOCATING 3014 31.1mb 192.168.56.20 I8hydUG -> 192.168.56.30 bGG90GE
+wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 H5dfFeA -> 192.168.56.30 bGG90GE
--------------------------------------------------
[float]
@@ -60,12 +60,12 @@ Before a shard can be used, it goes through an `INITIALIZING` state.
% curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":1}'
{"acknowledged":true}
% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
-wiki1 0 r INITIALIZING 0 14.3mb 192.168.56.30 Frankie Raye
-wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
-wiki1 1 r INITIALIZING 0 13.1mb 192.168.56.20 Commander Kraken
-wiki1 2 r INITIALIZING 0 14mb 192.168.56.10 Stiletto
-wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
+wiki1 0 r INITIALIZING 0 14.3mb 192.168.56.30 bGG90GE
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 bGG90GE
+wiki1 1 r INITIALIZING 0 13.1mb 192.168.56.20 I8hydUG
+wiki1 2 r INITIALIZING 0 14mb 192.168.56.10 H5dfFeA
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 I8hydUG
--------------------------------------------------
If a shard cannot be assigned, for example you've overallocated the
@@ -78,17 +78,17 @@ will remain `UNASSIGNED` with the <<reason-unassigned,reason code>> `ALLOCATION_
% curl 192.168.56.20:9200/_cat/health
1384316325 20:18:45 foo yellow 3 3 9 3 0 0 3
% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
-wiki1 0 r STARTED 3014 31.1mb 192.168.56.30 Frankie Raye
-wiki1 0 r STARTED 3014 31.1mb 192.168.56.20 Commander Kraken
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
+wiki1 0 r STARTED 3014 31.1mb 192.168.56.30 bGG90GE
+wiki1 0 r STARTED 3014 31.1mb 192.168.56.20 I8hydUG
wiki1 0 r UNASSIGNED ALLOCATION_FAILED
-wiki1 1 r STARTED 3013 29.6mb 192.168.56.10 Stiletto
-wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
-wiki1 1 r STARTED 3013 29.6mb 192.168.56.20 Commander Kraken
+wiki1 1 r STARTED 3013 29.6mb 192.168.56.10 H5dfFeA
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 bGG90GE
+wiki1 1 r STARTED 3013 29.6mb 192.168.56.20 I8hydUG
wiki1 1 r UNASSIGNED ALLOCATION_FAILED
-wiki1 2 r STARTED 3973 38.1mb 192.168.56.10 Stiletto
-wiki1 2 r STARTED 3973 38.1mb 192.168.56.30 Frankie Raye
-wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+wiki1 2 r STARTED 3973 38.1mb 192.168.56.10 H5dfFeA
+wiki1 2 r STARTED 3973 38.1mb 192.168.56.30 bGG90GE
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 I8hydUG
wiki1 2 r UNASSIGNED ALLOCATION_FAILED
--------------------------------------------------
diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc
new file mode 100644
index 0000000000..9a6dbead98
--- /dev/null
+++ b/docs/reference/cat/templates.asciidoc
@@ -0,0 +1,20 @@
+[[cat-templates]]
+== cat templates
+
+The `templates` command provides information about existing templates.
+
+[source, sh]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/templates?v=true'
+name template order version
+template0 te* 0
+template1 tea* 1
+template2 teak* 2 7
+--------------------------------------------------
+
+The output shows that there are three existing templates,
+with template_2 having a version value.
+
+The endpoint also supports giving a template name or pattern in the url
+to filter the results, for example `/_cat/templates/template*` or
+`/_cat/templates/template0`.
diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc
index 7955239fc5..35f3d9975e 100644
--- a/docs/reference/cluster.asciidoc
+++ b/docs/reference/cluster.asciidoc
@@ -15,18 +15,19 @@ example, here are some sample executions of nodes info:
[source,js]
--------------------------------------------------
# Local
-curl localhost:9200/_nodes/_local
+GET /_nodes/_local
# Address
-curl localhost:9200/_nodes/10.0.0.3,10.0.0.4
-curl localhost:9200/_nodes/10.0.0.*
+GET /_nodes/10.0.0.3,10.0.0.4
+GET /_nodes/10.0.0.*
# Names
-curl localhost:9200/_nodes/node_name_goes_here
-curl localhost:9200/_nodes/node_name_goes_*
-# Attributes (set something like node.rack: 2 in the config)
-curl localhost:9200/_nodes/rack:2
-curl localhost:9200/_nodes/ra*:2
-curl localhost:9200/_nodes/ra*:2*
+GET /_nodes/node_name_goes_here
+GET /_nodes/node_name_goes_*
+# Attributes (set something like node.attr.rack: 2 in the config)
+GET /_nodes/rack:2
+GET /_nodes/ra*:2
+GET /_nodes/ra*:2*
--------------------------------------------------
+// CONSOLE
--
include::cluster/health.asciidoc[]
diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc
index ec223722c6..972a918b14 100644
--- a/docs/reference/cluster/allocation-explain.asciidoc
+++ b/docs/reference/cluster/allocation-explain.asciidoc
@@ -5,6 +5,8 @@ The cluster allocation explanation API is designed to assist in answering the
question "why is this shard unassigned?". To explain the allocation (on
unassigned state) of a shard, issue a request like:
+experimental[The cluster allocation explain API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]
+
[source,js]
--------------------------------------------------
$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d'{
@@ -39,7 +41,7 @@ The response looks like:
"remaining_delay_ms" : 0, <5>
"nodes" : {
"V-Spi0AyRZ6ZvKbaI3691w" : {
- "node_name" : "node1",
+ "node_name" : "H5dfFeA",
"node_attributes" : { <6>
"bar" : "baz"
},
@@ -56,7 +58,7 @@ The response looks like:
} ]
},
"Qc6VL8c5RWaw1qXZ0Rg57g" : {
- "node_name" : "node2",
+ "node_name" : "bGG90GE",
"node_attributes" : {
"bar" : "baz",
"foo" : "bar"
@@ -74,7 +76,7 @@ The response looks like:
} ]
},
"PzdyMZGXQdGhqTJHF_hGgA" : {
- "node_name" : "node3",
+ "node_name" : "DKDM97B",
"node_attributes" : { },
"store" : {
"shard_copy" : "NONE"
@@ -120,7 +122,7 @@ For a shard that is already assigned, the output looks similar to:
"remaining_delay_ms" : 0,
"nodes" : {
"V-Spi0AyRZ6ZvKbaI3691w" : {
- "node_name" : "Susan Storm",
+ "node_name" : "bGG90GE",
"node_attributes" : {
"bar" : "baz"
},
@@ -137,7 +139,7 @@ For a shard that is already assigned, the output looks similar to:
} ]
},
"Qc6VL8c5RWaw1qXZ0Rg57g" : {
- "node_name" : "Slipstream",
+ "node_name" : "I8hydUG",
"node_attributes" : {
"bar" : "baz",
"foo" : "bar"
@@ -155,7 +157,7 @@ For a shard that is already assigned, the output looks similar to:
} ]
},
"PzdyMZGXQdGhqTJHF_hGgA" : {
- "node_name" : "The Symbiote",
+ "node_name" : "H5dfFeA",
"node_attributes" : { },
"store" : {
"shard_copy" : "NONE"
diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc
index 89806011b9..dc73b4408e 100644
--- a/docs/reference/cluster/health.asciidoc
+++ b/docs/reference/cluster/health.asciidoc
@@ -13,6 +13,8 @@ GET _cluster/health
// TEST[s/^/PUT test1\n/]
Returns this:
+
+[source,js]
--------------------------------------------------
{
"cluster_name" : "testcluster",
@@ -82,14 +84,14 @@ The cluster health API accepts the following request parameters:
provided or better, i.e. `green` > `yellow` > `red`. By default, will not
wait for any status.
-`wait_for_relocating_shards`::
- A number controlling to how many relocating
- shards to wait for. Usually will be `0` to indicate to wait till all
- relocations have happened. Defaults to not wait.
+`wait_for_no_relocating_shards`::
+ A boolean value which controls whether to wait (until the timeout provided)
+ for the cluster to have no shard relocations. Defaults to false, which means
+ it will not wait for relocating shards.
`wait_for_active_shards`::
- A number controlling to how many active
- shards to wait for. Defaults to not wait.
+ A number controlling to how many active shards to wait for, `all` to wait
+ for all shards in the cluster to be active, or `0` to not wait. Defaults to `0`.
`wait_for_nodes`::
The request waits until the specified number `N` of
diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc
index 4758ea2b0c..a95273f85f 100644
--- a/docs/reference/cluster/stats.asciidoc
+++ b/docs/reference/cluster/stats.asciidoc
@@ -116,7 +116,17 @@ Will return, for example:
"name": "Mac OS X",
"count": 1
}
- ]
+ ],
+ "mem" : {
+ "total" : "16gb",
+ "total_in_bytes" : 17179869184,
+ "free" : "78.1mb",
+ "free_in_bytes" : 81960960,
+ "used" : "15.9gb",
+ "used_in_bytes" : 17097908224,
+ "free_percent" : 0,
+ "used_percent" : 100
+ }
},
"process": {
"cpu": {
diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc
index 5720227f9f..dec4fb8f69 100644
--- a/docs/reference/cluster/tasks.asciidoc
+++ b/docs/reference/cluster/tasks.asciidoc
@@ -28,7 +28,7 @@ The result will look similar to the following:
{
"nodes" : {
"oTUltX4IQMOUUVeiohTt8A" : {
- "name" : "Tamara Rahn",
+ "name" : "H5dfFeA",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc
index b139e76149..407272a79e 100644
--- a/docs/reference/docs/bulk.asciidoc
+++ b/docs/reference/docs/bulk.asciidoc
@@ -66,17 +66,92 @@ example of a correct sequence of bulk commands:
[source,js]
--------------------------------------------------
+POST _bulk
{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
{ "field1" : "value1" }
{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
{ "field1" : "value3" }
-{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1"} }
+{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "test"} }
{ "doc" : {"field2" : "value2"} }
--------------------------------------------------
+// CONSOLE
-In the above example `doc` for the `update` action is a partial
-document, that will be merged with the already stored document.
+The result of this bulk operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "took": 30,
+ "errors": false,
+ "items": [
+ {
+ "index": {
+ "_index": "test",
+ "_type": "type1",
+ "_id": "1",
+ "_version": 1,
+ "result": "created",
+ "_shards": {
+ "total": 2,
+ "successful": 1,
+ "failed": 0
+ },
+ "created": true,
+ "status": 201
+ }
+ },
+ {
+ "delete": {
+ "found": false,
+ "_index": "test",
+ "_type": "type1",
+ "_id": "2",
+ "_version": 1,
+ "result": "not_found",
+ "_shards": {
+ "total": 2,
+ "successful": 1,
+ "failed": 0
+ },
+ "status": 404
+ }
+ },
+ {
+ "create": {
+ "_index": "test",
+ "_type": "type1",
+ "_id": "3",
+ "_version": 1,
+ "result": "created",
+ "_shards": {
+ "total": 2,
+ "successful": 1,
+ "failed": 0
+ },
+ "created": true,
+ "status": 201
+ }
+ },
+ {
+ "update": {
+ "_index": "test",
+ "_type": "type1",
+ "_id": "1",
+ "_version": 2,
+ "result": "updated",
+ "_shards": {
+ "total": 2,
+ "successful": 1,
+ "failed": 0
+ },
+ "status": 200
+ }
+ }
+ ]
+}
+--------------------------------------------------
+// TESTRESPONSE[s/"took": 30/"took": $body.took/ s/"index_uuid": .../"index_uuid": $body.items.3.update.error.index_uuid/]
The endpoints are `/_bulk`, `/{index}/_bulk`, and `{index}/{type}/_bulk`.
When the index or the index/type are provided, they will be used by
@@ -154,22 +229,25 @@ times an update should be retried in the case of a version conflict.
The `update` action payload, supports the following options: `doc`
(partial document), `upsert`, `doc_as_upsert`, `script`, `params` (for
-script), `lang` (for script) and `fields`. See update documentation for details on
-the options. Curl example with update actions:
+script), `lang` (for script) and `_source`. See update documentation for details on
+the options. Example with update actions:
[source,js]
--------------------------------------------------
+POST _bulk
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "doc" : {"field" : "value"} }
{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "script" : { "inline": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}}
{ "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "doc" : {"field" : "value"}, "doc_as_upsert" : true }
-{ "update" : {"_id" : "3", "_type" : "type1", "_index" : "index1", "fields" : ["_source"]} }
+{ "update" : {"_id" : "3", "_type" : "type1", "_index" : "index1", "_source" : true} }
{ "doc" : {"field" : "value"} }
{ "update" : {"_id" : "4", "_type" : "type1", "_index" : "index1"} }
-{ "doc" : {"field" : "value"}, "fields": ["_source"]}
+{ "doc" : {"field" : "value"}, "_source": true}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[float]
[[bulk-security]]
diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc
index 08bfa3318f..468f454562 100644
--- a/docs/reference/docs/delete-by-query.asciidoc
+++ b/docs/reference/docs/delete-by-query.asciidoc
@@ -160,7 +160,7 @@ to keep or remove as you see fit. When you are done with it, delete it so
Elasticsearch can reclaim the space it uses.
`wait_for_active_shards` controls how many copies of a shard must be active
-before proceeding with the request. See <<index-wait-for-active-shards,here>>
+before proceeding with the request. See <<index-wait-for-active-shards,here>>
for details. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the
<<docs-bulk,Bulk API>>.
@@ -244,7 +244,7 @@ The responses looks like:
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
- "name" : "Tyrannus",
+ "name" : "r1A2WoR",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
@@ -314,7 +314,7 @@ POST _tasks/taskid:1/_cancel
The `task_id` can be found using the tasks API above.
-Cancelation should happen quickly but might take a few seconds. The task status
+Cancellation should happen quickly but might take a few seconds. The task status
API above will continue to list the task until it is wakes to cancel itself.
@@ -339,3 +339,74 @@ like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the
query takes effect immediately but rethrotting that slows down the query will
take effect on after completing the current batch. This prevents scroll
timeouts.
+
+[float]
+=== Manually slicing
+
+Delete-by-query supports <<sliced-scroll>> allowing you to manually parallelize
+the process relatively easily:
+
+[source,js]
+----------------------------------------------------------------
+POST twitter/_delete_by_query
+{
+ "slice": {
+ "id": 0,
+ "max": 2
+ },
+ "query": {
+ "range": {
+ "likes": {
+ "lt": 10
+ }
+ }
+ }
+}
+POST twitter/_delete_by_query
+{
+ "slice": {
+ "id": 1,
+ "max": 2
+ },
+ "query": {
+ "range": {
+ "likes": {
+ "lt": 10
+ }
+ }
+ }
+}
+----------------------------------------------------------------
+// CONSOLE
+// TEST[setup:big_twitter]
+
+Which you can verify works with:
+
+[source,js]
+----------------------------------------------------------------
+GET _refresh
+POST twitter/_search?size=0&filter_path=hits.total
+{
+ "query": {
+ "range": {
+ "likes": {
+ "lt": 10
+ }
+ }
+ }
+}
+----------------------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+Which results in a sensible `total` like this one:
+
+[source,js]
+----------------------------------------------------------------
+{
+ "hits": {
+ "total": 0
+ }
+}
+----------------------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc
index 1d6544cdd9..bc6ff9f1a2 100644
--- a/docs/reference/docs/get.asciidoc
+++ b/docs/reference/docs/get.asciidoc
@@ -3,12 +3,14 @@
The get API allows to get a typed JSON document from the index based on
its id. The following example gets a JSON document from an index called
-twitter, under a type called tweet, with id valued 1:
+twitter, under a type called tweet, with id valued 0:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1'
+GET twitter/tweet/0
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The result of the above get operation is:
@@ -17,16 +19,18 @@ The result of the above get operation is:
{
"_index" : "twitter",
"_type" : "tweet",
- "_id" : "1",
+ "_id" : "0",
"_version" : 1,
"found": true,
"_source" : {
"user" : "kimchy",
- "postDate" : "2009-11-15T14:12:12",
+ "date" : "2009-11-15T14:12:12",
+ "likes": 0,
"message" : "trying out Elasticsearch"
}
}
--------------------------------------------------
+// TESTRESPONSE
The above result includes the `_index`, `_type`, `_id` and `_version`
of the document we wish to retrieve, including the actual `_source`
@@ -38,25 +42,21 @@ The API also allows to check for the existence of a document using
[source,js]
--------------------------------------------------
-curl -XHEAD -i 'http://localhost:9200/twitter/tweet/1'
+HEAD twitter/tweet/0
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
[[realtime]]
=== Realtime
By default, the get API is realtime, and is not affected by the refresh
-rate of the index (when data will become visible for search). In order
-to disable realtime GET, one can set `realtime` parameter to `false`.
-
-When getting a document, one can specify `fields` to fetch from it. They
-will, when possible, be fetched as stored fields (fields mapped as
-stored in the mapping). When using realtime GET, there is no notion of
-stored fields (at least for a period of time, basically, until the next
-flush), so they will be extracted from the source itself (note, even if
-source is not enabled). It is a good practice to assume that the fields
-will be loaded from source when using realtime GET, even if the fields
-are stored.
+rate of the index (when data will become visible for search). If a document
+has been updated but is not yet refreshed, the get API will issue a refresh
+call in-place to make the document visible. This will also make other documents
+changed since the last refresh visible. In order to disable realtime GET,
+one can set the `realtime` parameter to `false`.
[float]
[[type]]
@@ -71,13 +71,15 @@ to fetch the first document matching the id across all types.
=== Source filtering
By default, the get operation returns the contents of the `_source` field unless
-you have used the `fields` parameter or if the `_source` field is disabled.
+you have used the `stored_fields` parameter or if the `_source` field is disabled.
You can turn off `_source` retrieval by using the `_source` parameter:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1?_source=false'
+GET twitter/tweet/0?_source=false
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
If you only need one or two fields from the complete `_source`, you can use the `_source_include`
& `_source_exclude` parameters to include or filter out that parts you need. This can be especially helpful
@@ -86,37 +88,137 @@ of fields or wildcard expressions. Example:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1?_source_include=*.id&_source_exclude=entities'
+GET twitter/tweet/0?_source_include=*.id&_source_exclude=entities
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
If you only want to specify includes, you can use a shorter notation:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1?_source=*.id,retweeted'
+GET twitter/tweet/0?_source=*.id,retweeted
--------------------------------------------------
-
+// CONSOLE
+// TEST[setup:twitter]
[float]
-[[get-fields]]
-=== Fields
+[[get-stored-fields]]
+=== Stored Fields
The get operation allows specifying a set of stored fields that will be
-returned by passing the `fields` parameter. For example:
+returned by passing the `stored_fields` parameter.
+If the requested fields are not stored, they will be ignored.
+Consider for instance the following mapping:
+
+[source,js]
+--------------------------------------------------
+PUT twitter
+{
+ "mappings": {
+ "tweet": {
+ "properties": {
+ "counter": {
+ "type": "integer",
+ "store": false
+ },
+ "tags": {
+ "type": "keyword",
+ "store": true
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+
+Now we can add a document:
+
+[source,js]
+--------------------------------------------------
+PUT twitter/tweet/1
+{
+ "counter" : 1,
+ "tags" : ["red"]
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+... and try to retrieve it:
+
+[source,js]
+--------------------------------------------------
+GET twitter/tweet/1?stored_fields=tags,counter
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+The result of the above get operation is:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1?fields=title,content'
+{
+ "_index": "twitter",
+ "_type": "tweet",
+ "_id": "1",
+ "_version": 1,
+ "found": true,
+ "fields": {
+ "tags": [
+ "red"
+ ]
+ }
+}
--------------------------------------------------
+// TESTRESPONSE
+
-For backward compatibility, if the requested fields are not stored, they will be fetched
-from the `_source` (parsed and extracted). This functionality has been replaced by the
-<<get-source-filtering,source filtering>> parameter.
+Field values fetched from the document it self are always returned as an array.
+Since the `counter` field is not stored the get request simply ignores it when trying to get the `stored_fields.`
-Field values fetched from the document it self are always returned as an array. Metadata fields like `_routing` and
-`_parent` fields are never returned as an array.
+It is also possible to retrieve metadata fields like `_routing` and `_parent` fields:
+
+[source,js]
+--------------------------------------------------
+PUT twitter/tweet/2?routing=user1
+{
+ "counter" : 1,
+ "tags" : ["white"]
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+[source,js]
+--------------------------------------------------
+GET twitter/tweet/2?routing=user1&stored_fields=tags,counter
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+The result of the above get operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "_index": "twitter",
+ "_type": "tweet",
+ "_id": "2",
+ "_version": 1,
+ "_routing": "user1",
+ "found": true,
+ "fields": {
+ "tags": [
+ "white"
+ ]
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE
-Also only leaf fields can be returned via the `field` option. So object fields can't be returned and such requests
+Also only leaf fields can be returned via the `stored_field` option. So object fields can't be returned and such requests
will fail.
[float]
@@ -136,24 +238,29 @@ without any additional content around it. For example:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1/_source'
+GET twitter/tweet/1/_source
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
You can also use the same source filtering parameters to control which parts of the `_source` will be returned:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1/_source?_source_include=*.id&_source_exclude=entities'
+GET twitter/tweet/1/_source?_source_include=*.id&_source_exclude=entities'
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
Note, there is also a HEAD variant for the _source endpoint to efficiently test for document _source existence.
An existing document will not have a _source if it is disabled in the <<mapping-source-field,mapping>>.
-Curl example:
[source,js]
--------------------------------------------------
-curl -XHEAD -i 'http://localhost:9200/twitter/tweet/1/_source'
+HEAD twitter/tweet/1/_source
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[float]
[[get-routing]]
@@ -164,10 +271,12 @@ a document, the routing value should also be provided. For example:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/tweet/1?routing=kimchy'
+GET twitter/tweet/2?routing=user1
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
-The above will get a tweet with id 1, but will be routed based on the
+The above will get a tweet with id 2, but will be routed based on the
user. Note, issuing a get without the correct routing, will cause the
document not to be fetched.
diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc
index eb9b425219..57453b546a 100644
--- a/docs/reference/docs/index_.asciidoc
+++ b/docs/reference/docs/index_.asciidoc
@@ -40,7 +40,7 @@ The `_shards` header provides information about the replication process of the i
* `total` - Indicates to how many shard copies (primary and replica shards) the index operation should be executed on.
* `successful`- Indicates the number of shard copies the index operation succeeded on.
-* `failures` - An array that contains replication related errors in the case an index operation failed on a replica shard.
+* `failed` - An array that contains replication related errors in the case an index operation failed on a replica shard.
The index operation is successful in the case `successful` is at least 1.
diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc
index 15dec9aea2..21693cc514 100644
--- a/docs/reference/docs/multi-get.asciidoc
+++ b/docs/reference/docs/multi-get.asciidoc
@@ -155,7 +155,7 @@ curl 'localhost:9200/_mget' -d '{
[[mget-fields]]
=== Fields
-Specific stored fields can be specified to be retrieved per document to get, similar to the <<get-fields,fields>> parameter of the Get API.
+Specific stored fields can be specified to be retrieved per document to get, similar to the <<get-stored-fields,stored_fields>> parameter of the Get API.
For example:
[source,js]
@@ -166,31 +166,31 @@ curl 'localhost:9200/_mget' -d '{
"_index" : "test",
"_type" : "type",
"_id" : "1",
- "fields" : ["field1", "field2"]
+ "stored_fields" : ["field1", "field2"]
},
{
"_index" : "test",
"_type" : "type",
"_id" : "2",
- "fields" : ["field3", "field4"]
+ "stored_fields" : ["field3", "field4"]
}
]
}'
--------------------------------------------------
-Alternatively, you can specify the `fields` parameter in the query string
+Alternatively, you can specify the `stored_fields` parameter in the query string
as a default to be applied to all documents.
[source,js]
--------------------------------------------------
-curl 'localhost:9200/test/type/_mget?fields=field1,field2' -d '{
+curl 'localhost:9200/test/type/_mget?stored_fields=field1,field2' -d '{
"docs" : [
{
"_id" : "1" <1>
},
{
"_id" : "2",
- "fields" : ["field3", "field4"] <2>
+ "stored_fields" : ["field3", "field4"] <2>
}
]
}'
@@ -201,7 +201,7 @@ curl 'localhost:9200/test/type/_mget?fields=field1,field2' -d '{
[float]
=== Generated fields
-See <<generated-fields>> for fields are generated only when indexing.
+See <<generated-fields>> for fields generated only when indexing.
[float]
[[mget-routing]]
diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc
index 94ac05446b..46786c0b6d 100644
--- a/docs/reference/docs/reindex.asciidoc
+++ b/docs/reference/docs/reindex.asciidoc
@@ -424,7 +424,7 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`,
Sending the `refresh` url parameter will cause all indexes to which the request
wrote to be refreshed. This is different than the Index API's `refresh`
-parameter which causes just the shard that received the new data to be indexed.
+parameter which causes just the shard that received the new data to be refreshed.
If the request contains `wait_for_completion=false` then Elasticsearch will
perform some preflight checks, launch the request, and then return a `task`
@@ -435,7 +435,7 @@ to keep or remove as you see fit. When you are done with it, delete it so
Elasticsearch can reclaim the space it uses.
`wait_for_active_shards` controls how many copies of a shard must be active
-before proceeding with the reindexing. See <<index-wait-for-active-shards,here>>
+before proceeding with the reindexing. See <<index-wait-for-active-shards,here>>
for details. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the
<<docs-bulk,Bulk API>>.
@@ -528,7 +528,7 @@ The responses looks like:
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
- "name" : "Tyrannus",
+ "name" : "r1A2WoR",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
@@ -628,6 +628,7 @@ take effect on after completing the current batch. This prevents scroll
timeouts.
[float]
+[[docs-reindex-change-name]]
=== Reindex to change the name of a field
`_reindex` can be used to build a copy of an index with renamed fields. Say you
@@ -692,3 +693,120 @@ and it'll look like:
// TESTRESPONSE
Or you can search by `tag` or whatever you want.
+
+[float]
+=== Manually slicing
+
+Reindex supports <<sliced-scroll>> allowing you to manually parallelize the
+process relatively easily:
+
+[source,js]
+----------------------------------------------------------------
+POST _reindex
+{
+ "source": {
+ "index": "twitter",
+ "slice": {
+ "id": 0,
+ "max": 2
+ }
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+POST _reindex
+{
+ "source": {
+ "index": "twitter",
+ "slice": {
+ "id": 1,
+ "max": 2
+ }
+ },
+ "dest": {
+ "index": "new_twitter"
+ }
+}
+----------------------------------------------------------------
+// CONSOLE
+// TEST[setup:big_twitter]
+
+Which you can verify works with:
+
+[source,js]
+----------------------------------------------------------------
+GET _refresh
+POST new_twitter/_search?size=0&filter_path=hits.total
+----------------------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+Which results in a sensible `total` like this one:
+
+[source,js]
+----------------------------------------------------------------
+{
+ "hits": {
+ "total": 120
+ }
+}
+----------------------------------------------------------------
+// TESTRESPONSE
+
+[float]
+=== Reindex daily indices
+
+You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
+ to reindex daily indices to apply a new template to the existing documents.
+
+Assuming you have indices consisting of documents as following:
+
+[source,js]
+----------------------------------------------------------------
+PUT metricbeat-2016.05.30/beat/1?refresh
+{"system.cpu.idle.pct": 0.908}
+PUT metricbeat-2016.05.31/beat/1?refresh
+{"system.cpu.idle.pct": 0.105}
+----------------------------------------------------------------
+// CONSOLE
+
+The new template for the `metricbeat-*` indices is already loaded into elasticsearch
+but it applies only to the newly created indices. Painless can be used to reindex
+the existing documents and apply the new template.
+
+The script below extracts the date from the index name and creates a new index
+with `-1` appended. All data from `metricbeat-2016.05.31` will be reindex
+into `metricbeat-2016.05.31-1`.
+
+[source,js]
+----------------------------------------------------------------
+POST _reindex
+{
+ "source": {
+ "index": "metricbeat-*"
+ },
+ "dest": {
+ "index": "metricbeat"
+ },
+ "script": {
+ "lang": "painless",
+ "inline": "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'"
+ }
+}
+----------------------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+All documents from the previous metricbeat indices now can be found in the `*-1` indices.
+
+[source,js]
+----------------------------------------------------------------
+GET metricbeat-2016.05.30-1/beat/1
+GET metricbeat-2016.05.31-1/beat/1
+----------------------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+The previous method can also be used in combination with <<docs-reindex-change-name, change the name of a field>>
+to only load the existing data into the new index, but also rename fields if needed.
diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc
index 7d1f74be83..7299d398e6 100644
--- a/docs/reference/docs/update-by-query.asciidoc
+++ b/docs/reference/docs/update-by-query.asciidoc
@@ -217,7 +217,7 @@ to keep or remove as you see fit. When you are done with it, delete it so
Elasticsearch can reclaim the space it uses.
`wait_for_active_shards` controls how many copies of a shard must be active
-before proceeding with the request. See <<index-wait-for-active-shards,here>>
+before proceeding with the request. See <<index-wait-for-active-shards,here>>
for details. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the
<<docs-bulk,Bulk API>>.
@@ -306,7 +306,7 @@ The responses looks like:
{
"nodes" : {
"r1A2WoRbTwKZ516z6NEs5A" : {
- "name" : "Tyrannus",
+ "name" : "r1A2WoR",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
@@ -379,7 +379,7 @@ POST _tasks/taskid:1/_cancel
The `task_id` can be found using the tasks API above.
-Cancelation should happen quickly but might take a few seconds. The task status
+Cancellation should happen quickly but might take a few seconds. The task status
API above will continue to list the task until it is wakes to cancel itself.
@@ -406,6 +406,60 @@ take effect on after completing the current batch. This prevents scroll
timeouts.
[float]
+=== Manually slicing
+
+Update-by-query supports <<sliced-scroll>> allowing you to manually parallelize
+the process relatively easily:
+
+[source,js]
+----------------------------------------------------------------
+POST twitter/_update_by_query
+{
+ "slice": {
+ "id": 0,
+ "max": 2
+ },
+ "script": {
+ "inline": "ctx._source['extra'] = 'test'"
+ }
+}
+POST twitter/_update_by_query
+{
+ "slice": {
+ "id": 1,
+ "max": 2
+ },
+ "script": {
+ "inline": "ctx._source['extra'] = 'test'"
+ }
+}
+----------------------------------------------------------------
+// CONSOLE
+// TEST[setup:big_twitter]
+
+Which you can verify works with:
+
+[source,js]
+----------------------------------------------------------------
+GET _refresh
+POST twitter/_search?size=0&q=extra:test&filter_path=hits.total
+----------------------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+Which results in a sensible `total` like this one:
+
+[source,js]
+----------------------------------------------------------------
+{
+ "hits": {
+ "total": 120
+ }
+}
+----------------------------------------------------------------
+// TESTRESPONSE
+
+[float]
[[picking-up-a-new-property]]
=== Pick up a new property
diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc
index 28e33cb621..ff4c4c657d 100644
--- a/docs/reference/docs/update.asciidoc
+++ b/docs/reference/docs/update.asciidoc
@@ -17,11 +17,13 @@ For example, lets index a simple doc:
[source,js]
--------------------------------------------------
-curl -XPUT localhost:9200/test/type1/1 -d '{
+PUT test/type1/1
+{
"counter" : 1,
"tags" : ["red"]
-}'
+}
--------------------------------------------------
+// CONSOLE
[float]
=== Scripted updates
@@ -30,7 +32,8 @@ Now, we can execute a script that would increment the counter:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"script" : {
"inline": "ctx._source.counter += params.count",
"lang": "painless",
@@ -38,24 +41,29 @@ curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
"count" : 4
}
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
We can add a tag to the list of tags (note, if the tag exists, it
will still add it, since its a list):
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"script" : {
- "inline": "ctx._source.tags += params.tag",
+ "inline": "ctx._source.tags.add(params.tag)",
"lang": "painless",
"params" : {
"tag" : "blue"
}
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
In addition to `_source`, the following variables are available through
the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`,
@@ -65,36 +73,45 @@ We can also add a new field to the document:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
- "script" : "ctx._source.name_of_new_field = \"value_of_new_field\""
-}'
+POST test/type1/1/_update
+{
+ "script" : "ctx._source.new_field = \"value_of_new_field\""
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
Or remove a field from the document:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
- "script" : "ctx._source.remove(\"name_of_field\")"
-}'
+POST test/type1/1/_update
+{
+ "script" : "ctx._source.remove(\"new_field\")"
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
And, we can even change the operation that is executed. This example deletes
-the doc if the `tags` field contain `blue`, otherwise it does nothing
+the doc if the `tags` field contain `green`, otherwise it does nothing
(`noop`):
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"script" : {
- "inline": "ctx._source.tags.contains(params.tag) ? ctx.op = \"delete\" : ctx.op = \"none\"",
+ "inline": "if (ctx._source.tags.contains(params.tag)) { ctx.op = \"delete\" } else { ctx.op = \"none\" }",
"lang": "painless",
"params" : {
- "tag" : "blue"
+ "tag" : "green"
}
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[float]
=== Updates with a partial document
@@ -106,31 +123,36 @@ example:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"doc" : {
"name" : "new_name"
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
If both `doc` and `script` are specified, then `doc` is ignored. Best is
to put your field pairs of the partial document in the script itself.
[float]
=== Detecting noop updates
-If `doc` is specified its value is merged with the existing `_source`. By
-default the document is only reindexed if the new `_source` field differs from
-the old. Setting `detect_noop` to `false` will cause Elasticsearch to always
-update the document even if it hasn't changed. For example:
+
+If `doc` is specified its value is merged with the existing `_source`.
+By default updates that don't change anything detect that they don't change anything and return "result": "noop" like this:
+
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"doc" : {
"name" : "new_name"
- },
- "detect_noop": false
-}'
+ }
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
If `name` was `new_name` before the request was sent then the entire update
request is ignored. The `result` element in the response returns `noop` if
@@ -139,13 +161,34 @@ the request was ignored.
[source,js]
--------------------------------------------------
{
+ "_shards": {
+ "total": 0,
+ "successful": 0,
+ "failed": 0
+ },
"_index": "test",
"_type": "type1",
"_id": "1",
- "_version": 1,
+ "_version": 6,
"result": noop
}
--------------------------------------------------
+// TESTRESPONSE
+
+You can disable this behavior by setting "detect_noop": false like this:
+
+[source,js]
+--------------------------------------------------
+POST test/type1/1/_update
+{
+ "doc" : {
+ "name" : "new_name"
+ },
+ "detect_noop": true
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[[upserts]]
[float]
@@ -157,7 +200,8 @@ will be inserted as a new document. If the document does exist, then the
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"script" : {
"inline": "ctx._source.counter += params.count",
"lang": "painless",
@@ -168,8 +212,10 @@ curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
"upsert" : {
"counter" : 1
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[float]
==== `scripted_upsert`
@@ -180,7 +226,8 @@ or not -- i.e. the script handles initializing the document instead of the
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/sessions/session/dh3sgudg8gsrgl/_update' -d '{
+POST sessions/session/dh3sgudg8gsrgl/_update
+{
"scripted_upsert":true,
"script" : {
"id": "my_web_session_summariser",
@@ -193,7 +240,7 @@ curl -XPOST 'localhost:9200/sessions/session/dh3sgudg8gsrgl/_update' -d '{
}
},
"upsert" : {}
-}'
+}
--------------------------------------------------
[float]
@@ -205,13 +252,16 @@ value:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+POST test/type1/1/_update
+{
"doc" : {
"name" : "new_name"
},
"doc_as_upsert" : true
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
[float]
@@ -247,7 +297,7 @@ Timeout waiting for a shard to become available.
`wait_for_active_shards`::
-The number of shard copies required to be active before proceeding with the update operation.
+The number of shard copies required to be active before proceeding with the update operation.
See <<index-wait-for-active-shards,here>> for details.
`refresh`::
@@ -255,10 +305,12 @@ See <<index-wait-for-active-shards,here>> for details.
Control when the changes made by this request are visible to search. See
<<docs-refresh>>.
-`fields`::
+`_source`::
+
+Allows to control if and how the updated source should be returned in the response.
+By default the updated source is not returned.
+See <<search-request-source-filtering, `source filtering`>> for details.
-Return the relevant fields from the updated document. Specify `_source` to
-return the full updated source.
`version` & `version_type`::
diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc
index fa257742bb..a0239d576a 100755
--- a/docs/reference/getting-started.asciidoc
+++ b/docs/reference/getting-started.asciidoc
@@ -40,7 +40,8 @@ Note that it is valid and perfectly fine to have a cluster with only a single no
[float]
=== Node
-A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search capabilities. Just like a cluster, a node is identified by a name which by default is a random Marvel character name that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster.
+A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search
+capabilities. Just like a cluster, a node is identified by a name which by default is a random Universally Unique IDentifier (UUID) that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster.
A node can be configured to join a specific cluster by the cluster name. By default, each node is set up to join a cluster named `elasticsearch` which means that if you start up a number of nodes on your network and--assuming they can discover each other--they will all automatically form and join a single cluster named `elasticsearch`.
@@ -115,8 +116,9 @@ Let's download the Elasticsearch {version} tar as follows (Windows users should
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
-curl -L -O https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/{version}/elasticsearch-{version}.tar.gz
+curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz
--------------------------------------------------
+// NOTCONSOLE
Then extract it as follows (Windows users should unzip the zip package):
@@ -143,21 +145,33 @@ If everything goes well, you should see a bunch of messages that look like below
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
-./elasticsearch
-[2014-03-13 13:42:17,218][INFO ][node ] [New Goblin] version[{version}], pid[2085], build[5c03844/2014-02-25T15:52:53Z]
-[2014-03-13 13:42:17,219][INFO ][node ] [New Goblin] initializing ...
-[2014-03-13 13:42:17,223][INFO ][plugins ] [New Goblin] loaded [], sites []
-[2014-03-13 13:42:19,831][INFO ][node ] [New Goblin] initialized
-[2014-03-13 13:42:19,832][INFO ][node ] [New Goblin] starting ...
-[2014-03-13 13:42:19,958][INFO ][transport ] [New Goblin] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/192.168.8.112:9300]}
-[2014-03-13 13:42:23,030][INFO ][cluster.service] [New Goblin] new_master [New Goblin][rWMtGj3dQouz2r6ZFL9v4g][mwubuntu1][inet[/192.168.8.112:9300]], reason: zen-disco-join (elected_as_master)
-[2014-03-13 13:42:23,100][INFO ][discovery ] [New Goblin] elasticsearch/rWMtGj3dQouz2r6ZFL9v4g
-[2014-03-13 13:42:23,125][INFO ][http ] [New Goblin] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/192.168.8.112:9200]}
-[2014-03-13 13:42:23,629][INFO ][gateway ] [New Goblin] recovered [1] indices into cluster_state
-[2014-03-13 13:42:23,630][INFO ][node ] [New Goblin] started
---------------------------------------------------
-
-Without going too much into detail, we can see that our node named "New Goblin" (which will be a different Marvel character in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster.
+[2016-09-16T14:17:51,251][INFO ][o.e.n.Node ] [] initializing ...
+[2016-09-16T14:17:51,329][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [317.7gb], net total_space [453.6gb], spins? [no], types [ext4]
+[2016-09-16T14:17:51,330][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] heap size [1.9gb], compressed ordinary object pointers [true]
+[2016-09-16T14:17:51,333][INFO ][o.e.n.Node ] [6-bjhwl] node name [6-bjhwl] derived from node ID; set [node.name] to override
+[2016-09-16T14:17:51,334][INFO ][o.e.n.Node ] [6-bjhwl] version[{version}], pid[21261], build[f5daa16/2016-09-16T09:12:24.346Z], OS[Linux/4.4.0-36-generic/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [aggs-matrix-stats]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [ingest-common]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-expression]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-groovy]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-mustache]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-painless]
+[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [percolator]
+[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [reindex]
+[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty3]
+[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty4]
+[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded plugin [mapper-murmur3]
+[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] initialized
+[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] starting ...
+[2016-09-16T14:17:53,671][INFO ][o.e.t.TransportService ] [6-bjhwl] publish_address {192.168.8.112:9300}, bound_addresses {{192.168.8.112:9300}
+[2016-09-16T14:17:53,676][WARN ][o.e.b.BootstrapCheck ] [6-bjhwl] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144]
+[2016-09-16T14:17:56,718][INFO ][o.e.c.s.ClusterService ] [6-bjhwl] new_master {6-bjhwl}{6-bjhwl4TkajjoD2oEipnQ}{8m3SNKoFR6yQl1I0JUfPig}{192.168.8.112}{192.168.8.112:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
+[2016-09-16T14:17:56,731][INFO ][o.e.h.HttpServer ] [6-bjhwl] publish_address {192.168.8.112:9200}, bound_addresses {[::1]:9200}, {192.168.8.112:9200}
+[2016-09-16T14:17:56,732][INFO ][o.e.g.GatewayService ] [6-bjhwl] recovered [0] indices into cluster_state
+[2016-09-16T14:17:56,748][INFO ][o.e.n.Node ] [6-bjhwl] started
+--------------------------------------------------
+
+Without going too much into detail, we can see that our node named "I8hydUG" (which will be a different set of characters in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster.
As mentioned previously, we can override either the cluster or node name. This can be done from the command line when starting Elasticsearch as follows:
@@ -186,18 +200,20 @@ Let's start with a basic health check, which we can use to see how our cluster i
To check the cluster health, we will be using the <<cat,`_cat` API>>. Remember previously that our node HTTP endpoint is available at port `9200`:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl 'localhost:9200/_cat/health?v'
+GET /_cat/health?v
--------------------------------------------------
+// CONSOLE
And the response:
-[source,sh]
+[source,txt]
--------------------------------------------------
-epoch timestamp cluster status node.total node.data shards pri relo init unassign
-1394735289 14:28:09 elasticsearch green 1 1 0 0 0 0 0
+epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
+1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0%
--------------------------------------------------
+// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTest/ _cat]
We can see that our cluster named "elasticsearch" is up with a green status.
@@ -207,38 +223,41 @@ Also from the above response, we can see and total of 1 node and that we have 0
We can also get a list of nodes in our cluster as follows:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl 'localhost:9200/_cat/nodes?v'
+GET /_cat/nodes?v
--------------------------------------------------
+// CONSOLE
And the response:
-[source,sh]
+[source,txt]
--------------------------------------------------
-curl 'localhost:9200/_cat/nodes?v'
-host ip heap.percent ram.percent load node.role master name
-mwubuntu1 127.0.1.1 8 4 0.00 d * New Goblin
+ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
+127.0.0.1 10 5 5 4.46 mdi * PB2SGZY
--------------------------------------------------
+// TESTRESPONSE[s/10 5 5 4.46/\\d+ \\d+ \\d+ (\\d+\\.\\d+)? (\\d+\\.\\d+)? (\\d+\.\\d+)?/]
+// TESTRESPONSE[s/[*]/[*]/ s/PB2SGZY/.+/ _cat]
-Here, we can see our one node named "New Goblin", which is the single node that is currently in our cluster.
+Here, we can see our one node named "I8hydUG", which is the single node that is currently in our cluster.
=== List All Indices
Now let's take a peek at our indices:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl 'localhost:9200/_cat/indices?v'
+GET /_cat/indices?v
--------------------------------------------------
+// CONSOLE
And the response:
-[source,sh]
+[source,txt]
--------------------------------------------------
-curl 'localhost:9200/_cat/indices?v'
-health index pri rep docs.count docs.deleted store.size pri.store.size
+health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
--------------------------------------------------
+// TESTRESPONSE[_cat]
Which simply means we have no indices yet in the cluster.
@@ -246,28 +265,23 @@ Which simply means we have no indices yet in the cluster.
Now let's create an index named "customer" and then list all the indexes again:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer?pretty'
-curl 'localhost:9200/_cat/indices?v'
+PUT /customer?pretty
+GET /_cat/indices?v
--------------------------------------------------
+// CONSOLE
The first command creates the index named "customer" using the PUT verb. We simply append `pretty` to the end of the call to tell it to pretty-print the JSON response (if any).
And the response:
-[source,sh]
+[source,txt]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer?pretty'
-{
- "acknowledged" : true,
- "shards_acknowledged": true
-}
-
-curl 'localhost:9200/_cat/indices?v'
-health index pri rep docs.count docs.deleted store.size pri.store.size
-yellow customer 5 1 0 0 495b 495b
+health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
+yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b
--------------------------------------------------
+// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+b/ _cat]
The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it.
@@ -279,32 +293,34 @@ Let's now put something into our customer index. Remember previously that in ord
Let's index a simple customer document into the customer index, "external" type, with an ID of 1 as follows:
-Our JSON document: { "name": "John Doe" }
-
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer/external/1?pretty' -d '
+PUT /customer/external/1?pretty
{
"name": "John Doe"
-}'
+}
--------------------------------------------------
+// CONSOLE
And the response:
[source,sh]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer/external/1?pretty' -d '
-{
- "name": "John Doe"
-}'
{
"_index" : "customer",
"_type" : "external",
"_id" : "1",
"_version" : 1,
+ "result" : "created",
+ "_shards" : {
+ "total" : 2,
+ "successful" : 1,
+ "failed" : 0
+ },
"created" : true
}
--------------------------------------------------
+// TESTRESPONSE
From the above, we can see that a new customer document was successfully created inside the customer index and the external type. The document also has an internal id of 1 which we specified at index time.
@@ -312,16 +328,17 @@ It is important to note that Elasticsearch does not require you to explicitly cr
Let's now retrieve that document that we just indexed:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/customer/external/1?pretty'
+GET /customer/external/1?pretty
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
And the response:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/customer/external/1?pretty'
{
"_index" : "customer",
"_type" : "external",
@@ -331,6 +348,7 @@ curl -XGET 'localhost:9200/customer/external/1?pretty'
"_source" : { "name": "John Doe" }
}
--------------------------------------------------
+// TESTRESPONSE
Nothing out of the ordinary here other than a field, `found`, stating that we found a document with the requested ID 1 and another field, `_source`, which returns the full JSON document that we indexed from the previous step.
@@ -338,45 +356,44 @@ Nothing out of the ordinary here other than a field, `found`, stating that we fo
Now let's delete the index that we just created and then list all the indexes again:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XDELETE 'localhost:9200/customer?pretty'
-curl 'localhost:9200/_cat/indices?v'
+DELETE /customer?pretty
+GET /_cat/indices?v
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
And the response:
-[source,sh]
+[source,txt]
--------------------------------------------------
-curl -XDELETE 'localhost:9200/customer?pretty'
-{
- "acknowledged" : true
-}
-curl 'localhost:9200/_cat/indices?v'
-health index pri rep docs.count docs.deleted store.size pri.store.size
+health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
--------------------------------------------------
+// TESTRESPONSE[_cat]
Which means that the index was deleted successfully and we are now back to where we started with nothing in our cluster.
Before we move on, let's take a closer look again at some of the API commands that we have learned so far:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer'
-curl -XPUT 'localhost:9200/customer/external/1' -d '
+PUT /customer
+PUT /customer/external/1
{
"name": "John Doe"
-}'
-curl 'localhost:9200/customer/external/1'
-curl -XDELETE 'localhost:9200/customer'
+}
+GET /customer/external/1
+DELETE /customer
--------------------------------------------------
If we study the above commands carefully, we can actually see a pattern of how we access data in Elasticsearch. That pattern can be summarized as follows:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -X<REST Verb> <Node>:<Port>/<Index>/<Type>/<ID>
+<REST Verb> /<Index>/<Type>/<ID>
--------------------------------------------------
+// NOTCONSOLE
This REST access pattern is pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch.
@@ -389,33 +406,38 @@ Elasticsearch provides data manipulation and search capabilities in near real ti
We've previously seen how we can index a single document. Let's recall that command again:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer/external/1?pretty' -d '
+PUT /customer/external/1?pretty
{
"name": "John Doe"
-}'
+}
--------------------------------------------------
+// CONSOLE
Again, the above will index the specified document into the customer index, external type, with the ID of 1. If we then executed the above command again with a different (or same) document, Elasticsearch will replace (i.e. reindex) a new document on top of the existing one with the ID of 1:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer/external/1?pretty' -d '
+PUT /customer/external/1?pretty
{
"name": "Jane Doe"
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
The above changes the name of the document with the ID of 1 from "John Doe" to "Jane Doe". If, on the other hand, we use a different ID, a new document will be indexed and the existing document(s) already in the index remains untouched.
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPUT 'localhost:9200/customer/external/2?pretty' -d '
+PUT /customer/external/2?pretty
{
"name": "Jane Doe"
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
The above indexes a new document with an ID of 2.
@@ -423,13 +445,15 @@ When indexing, the ID part is optional. If not specified, Elasticsearch will gen
This example shows how to index a document without an explicit ID:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external?pretty' -d '
+POST /customer/external?pretty
{
"name": "Jane Doe"
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
Note that in the above case, we are using the POST verb instead of PUT since we didn't specify an ID.
@@ -439,33 +463,39 @@ In addition to being able to index and replace documents, we can also update doc
This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe":
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d '
+POST /customer/external/1/_update?pretty
{
"doc": { "name": "Jane Doe" }
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe" and at the same time add an age field to it:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d '
+POST /customer/external/1/_update?pretty
{
"doc": { "name": "Jane Doe", "age": 20 }
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
-Updates can also be performed by using simple scripts. Note that dynamic scripts like the following are disabled by default as of `1.4.3`, have a look at the <<modules-scripting,scripting docs>> for more details. This example uses a script to increment the age by 5:
+Updates can also be performed by using simple scripts. This example uses a script to increment the age by 5:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d '
+POST /customer/external/1/_update?pretty
{
"script" : "ctx._source.age += 5"
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
In the above example, `ctx._source` refers to the current source document that is about to be updated.
@@ -475,12 +505,16 @@ Note that as of this writing, updates can only be performed on a single document
Deleting a document is fairly straightforward. This example shows how to delete our previous customer with the ID of 2:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XDELETE 'localhost:9200/customer/external/2?pretty'
+DELETE /customer/external/2?pretty
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
-The `delete-by-query` plugin can delete all documents matching a specific query.
+See the <<docs-delete-by-query>> to delete all documents matching a specific query.
+It is worth noting that it is much more efficient to delete a whole index
+instead of deleting all documents with the Delete By Query API.
=== Batch Processing
@@ -488,26 +522,27 @@ In addition to being able to index, update, and delete individual documents, Ela
As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation:
-[source,sh]
+[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external/_bulk?pretty' -d '
+POST /customer/external/_bulk?pretty
{"index":{"_id":"1"}}
{"name": "John Doe" }
{"index":{"_id":"2"}}
{"name": "Jane Doe" }
-'
--------------------------------------------------
+// CONSOLE
This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation:
[source,sh]
--------------------------------------------------
-curl -XPOST 'localhost:9200/customer/external/_bulk?pretty' -d '
+POST /customer/external/_bulk?pretty
{"update":{"_id":"1"}}
{"doc": { "name": "John Doe becomes Jane Doe" } }
{"delete":{"_id":"2"}}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted.
@@ -1053,7 +1088,7 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d '
"aggs": {
"group_by_gender": {
"terms": {
- "field": "gender"
+ "field": "gender.keyword"
},
"aggs": {
"average_balance": {
diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc
index bb5a367a04..50187af5b2 100644
--- a/docs/reference/how-to/indexing-speed.asciidoc
+++ b/docs/reference/how-to/indexing-speed.asciidoc
@@ -68,6 +68,15 @@ make sure to give at least half the memory of the machine running elasticsearch
to the filesystem cache.
[float]
+=== Use auto-generated ids
+
+When indexing a document that has an explicit id, elasticsearch needs to check
+whether a document with the same id already exists within the same shard, which
+is a costly operation and gets even more costly as the index grows. By using
+auto-generated ids, Elasticsearch can skip this check, which makes indexing
+faster.
+
+[float]
=== Use faster hardware
If indexing is I/O bound, you should investigate giving more memory to the
diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc
index 67848c9edc..2d0525a48e 100644
--- a/docs/reference/how-to/search-speed.asciidoc
+++ b/docs/reference/how-to/search-speed.asciidoc
@@ -141,6 +141,124 @@ In general, scripts should be avoided. If they are absolutely needed, you
should prefer the `painless` and `expressions` engines.
[float]
+=== Search rounded dates
+
+Queries on date fields that use `now` are typically not cacheable since the
+range that is being matched changes all the time. However switching to a
+rounded date is often acceptable in terms of user experience, and has the
+benefit of making better use of the query cache.
+
+For instance the below query:
+
+[source,js]
+--------------------------------------------------
+PUT index/type/1
+{
+ "my_date": "2016-05-11T16:30:55.328Z"
+}
+
+GET index/_search
+{
+ "query": {
+ "constant_score": {
+ "filter": {
+ "range": {
+ "my_date": {
+ "gte": "now-1h",
+ "lte": "now"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+
+could be replaced with the following query:
+
+[source,js]
+--------------------------------------------------
+GET index/_search
+{
+ "query": {
+ "constant_score": {
+ "filter": {
+ "range": {
+ "my_date": {
+ "gte": "now-1h/m",
+ "lte": "now/m"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+In that case we rounded to the minute, so if the current time is `16:31:29`,
+the range query will match everything whose value of the `my_date` field is
+between `15:31:00` and `16:31:59`. And if several users run a query that
+contains this range in the same minute, the query cache could help speed things
+up a bit. The longer the interval that is used for rounding, the more the query
+cache can help, but beware that too aggressive rounding might also hurt user
+experience.
+
+
+NOTE: It might be tempting to split ranges into a large cacheable part and
+smaller not cacheable parts in order to be able to leverage the query cache,
+as shown below:
+
+[source,js]
+--------------------------------------------------
+GET index/_search
+{
+ "query": {
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "should": [
+ {
+ "range": {
+ "my_date": {
+ "gte": "now-1h",
+ "lte": "now-1h/m"
+ }
+ }
+ },
+ {
+ "range": {
+ "my_date": {
+ "gt": "now-1h/m",
+ "lt": "now/m"
+ }
+ }
+ },
+ {
+ "range": {
+ "my_date": {
+ "gte": "now/m",
+ "lte": "now"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+However such practice might make the query run slower in some cases since the
+overhead introduced by the `bool` query may defeat the savings from better
+leveraging the query cache.
+
+[float]
=== Force-merge read-only indices
Indices that are read-only would benefit from being
diff --git a/docs/reference/images/lambda_calc.png b/docs/reference/images/lambda_calc.png
index 2d7f8bbb8d..4fd19a2660 100644
--- a/docs/reference/images/lambda_calc.png
+++ b/docs/reference/images/lambda_calc.png
Binary files differ
diff --git a/docs/reference/images/sigma_calc.png b/docs/reference/images/sigma_calc.png
index d794c0a69d..9001bbe9ea 100644
--- a/docs/reference/images/sigma_calc.png
+++ b/docs/reference/images/sigma_calc.png
Binary files differ
diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc
index ff1a3c62c7..28e9e6a114 100644
--- a/docs/reference/index-modules.asciidoc
+++ b/docs/reference/index-modules.asciidoc
@@ -38,7 +38,11 @@ specific index module:
The number of primary shards that an index should have. Defaults to 5.
This setting can only be set at index creation time. It cannot be
- changed on a closed index.
+ changed on a closed index. Note: the number of shards are limited to `1024` per
+ index. This limitation is a safety limit to prevent accidental creation of indices
+ that can destabilize a cluster due to resource allocation. The limit can be modified
+ by specifying `export ES_JAVA_OPTS="-Des.index.max_number_of_shards=128"` system property on every node that is
+ part of the cluster.
`index.shard.check_on_startup`::
+
diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc
index 217a55fb0b..423968bb07 100644
--- a/docs/reference/index-modules/slowlog.asciidoc
+++ b/docs/reference/index-modules/slowlog.asciidoc
@@ -40,17 +40,25 @@ of the actual execution on the specific machine, compared with request
level.
The logging file is configured by default using the following
-configuration (found in `logging.yml`):
+configuration (found in `log4j2.properties`):
[source,yaml]
--------------------------------------------------
-index_search_slow_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_index_search_slowlog.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
--------------------------------------------------
[float]
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index 036bd59ad6..c79ab86d11 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -1,15 +1,16 @@
[[elasticsearch-reference]]
= Elasticsearch Reference
-:version: 5.0.0-alpha5
-:major-version: 5.x
-:branch: master
-:jdk: 1.8.0_73
-:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master
-:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master
-:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/master/
-:issue: https://github.com/elastic/elasticsearch/issues/
-:pull: https://github.com/elastic/elasticsearch/pull/
+:version: 6.0.0-alpha1
+:major-version: 6.x
+:lucene_version: 6.2.0
+:branch: master
+:jdk: 1.8.0_73
+:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master
+:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master
+:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/master/
+:issue: https://github.com/elastic/elasticsearch/issues/
+:pull: https://github.com/elastic/elasticsearch/pull/
include::getting-started.asciidoc[]
@@ -49,10 +50,8 @@ include::testing.asciidoc[]
include::glossary.asciidoc[]
-include::release-notes.asciidoc[]
+//////
+ include::release-notes.asciidoc[]
+//////
include::redirects.asciidoc[]
-
-
-
-
diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc
index 294bbd32a5..16d318ee97 100644
--- a/docs/reference/indices/aliases.asciidoc
+++ b/docs/reference/indices/aliases.asciidoc
@@ -24,7 +24,7 @@ POST /_aliases
// CONSOLE
// TEST[s/^/PUT test1\nPUT test2\n/]
-An alias can also be removed, for example:
+And here is removing that same alias:
[source,js]
--------------------------------------------------
@@ -109,6 +109,25 @@ indices that match this pattern are added/removed.
It is an error to index to an alias which points to more than one index.
+It is also possible to swap an index with an alias in one operation:
+
+[source,js]
+--------------------------------------------------
+PUT test <1>
+PUT test_2 <2>
+POST /_aliases
+{
+ "actions" : [
+ { "add": { "index": "test_2", "alias": "test" } },
+ { "remove_index": { "index": "test" } } <3>
+ ]
+}
+--------------------------------------------------
+// CONSOLE
+<1> An index we've added by mistake
+<2> The index we should have added
+<3> `remove_index` is just like <<indices-delete-index>>
+
[float]
[[filtered]]
=== Filtered Aliases
diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc
index e5ed67bf12..dbb2c8f101 100644
--- a/docs/reference/indices/analyze.asciidoc
+++ b/docs/reference/indices/analyze.asciidoc
@@ -43,13 +43,13 @@ curl -XGET 'localhost:9200/_analyze' -d '
curl -XGET 'localhost:9200/_analyze' -d '
{
"tokenizer" : "keyword",
- "token_filter" : ["lowercase"],
+ "filter" : ["lowercase"],
"char_filter" : ["html_strip"],
"text" : "this is a <b>test</b>"
}'
--------------------------------------------------
-deprecated[5.0.0, Use `filter`/`token_filter`/`char_filter` instead of `filters`/`token_filters`/`char_filters`]
+deprecated[5.0.0, Use `filter`/`char_filter` instead of `filters`/`char_filters` and `token_filters` has been removed]
Custom tokenizers, token filters, and character filters can be specified in the request body as follows:
@@ -112,7 +112,7 @@ provided it doesn't start with `{` :
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&token_filter=lowercase&char_filter=html_strip' -d 'this is a <b>test</b>'
+curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&char_filter=html_strip' -d 'this is a <b>test</b>'
--------------------------------------------------
=== Explain Analyze
@@ -136,8 +136,6 @@ GET _analyze
// CONSOLE
<1> Set "keyword" to output "keyword" attribute only
-coming[2.0.0, body based parameters were added in 2.0.0]
-
The request returns the following result:
[source,js]
@@ -183,4 +181,5 @@ The request returns the following result:
}
}
--------------------------------------------------
+// TESTRESPONSE
<1> Output only "keyword" attribute, since specify "attributes" in the request.
diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc
index 8ebb9e3488..6a7240dc95 100644
--- a/docs/reference/indices/clearcache.asciidoc
+++ b/docs/reference/indices/clearcache.asciidoc
@@ -6,8 +6,10 @@ associated with one or more indices.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
+POST /twitter/_cache/clear
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The API, by default, will clear all caches. Specific caches can be cleaned
explicitly by setting `query`, `fielddata` or `request`.
@@ -24,8 +26,9 @@ call, or even on `_all` the indices.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear'
+POST /kimchy,elasticsearch/_cache/clear
-$ curl -XPOST 'http://localhost:9200/_cache/clear'
+POST /_cache/clear
--------------------------------------------------
-
+// CONSOLE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc
index 5c652accfb..bc057e155d 100644
--- a/docs/reference/indices/delete-index.asciidoc
+++ b/docs/reference/indices/delete-index.asciidoc
@@ -5,8 +5,10 @@ The delete index API allows to delete an existing index.
[source,js]
--------------------------------------------------
-$ curl -XDELETE 'http://localhost:9200/twitter/'
+DELETE /twitter
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The above example deletes an index called `twitter`. Specifying an index,
alias or wildcard expression is required.
diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc
index a33b7fdfe2..26baf21417 100644
--- a/docs/reference/indices/forcemerge.asciidoc
+++ b/docs/reference/indices/forcemerge.asciidoc
@@ -12,8 +12,10 @@ block until the previous force merge is complete.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge'
+POST /twitter/_forcemerge
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
[[forcemerge-parameters]]
@@ -45,7 +47,9 @@ even on `_all` the indices.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge'
+POST /kimchy,elasticsearch/_forcemerge
-$ curl -XPOST 'http://localhost:9200/_forcemerge'
+POST /_forcemerge
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc
index 39667dc087..224e74605f 100644
--- a/docs/reference/indices/get-field-mapping.asciidoc
+++ b/docs/reference/indices/get-field-mapping.asciidoc
@@ -9,8 +9,10 @@ The following returns the mapping of the field `text` only:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/_mapping/tweet/field/text'
+GET /twitter/_mapping/tweet/field/message
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
For which the response is (assuming `text` is a default string field):
@@ -18,18 +20,28 @@ For which the response is (assuming `text` is a default string field):
--------------------------------------------------
{
"twitter": {
- "tweet": {
- "text": {
- "full_name": "text",
- "mapping": {
- "text": { "type": "text" }
+ "mappings": {
+ "tweet": {
+ "message": {
+ "full_name": "message",
+ "mapping": {
+ "message": {
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ }
+ }
}
}
}
}
}
--------------------------------------------------
-
+// TESTRESPONSE
[float]
@@ -44,12 +56,15 @@ following are some examples:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message'
+GET /twitter,kimchy/_mapping/field/message
-curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book/field/message,user.id'
+GET /_all/_mapping/tweet,book/field/message,user.id
-curl -XGET 'http://localhost:9200/_all/_mapping/tw*/field/*.id'
+GET /_all/_mapping/tw*/field/*.id
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+// TEST[s/^/PUT kimchy\nPUT book\n/]
[float]
=== Specifying fields
diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc
index b82bee0563..772318c71d 100644
--- a/docs/reference/indices/get-index.asciidoc
+++ b/docs/reference/indices/get-index.asciidoc
@@ -1,12 +1,14 @@
[[indices-get-index]]
== Get Index
-The get index API allows to retrieve information about one or more indexes.
+The get index API allows to retrieve information about one or more indexes.
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/'
+GET /twitter
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The above example gets the information for an index called `twitter`. Specifying an index,
alias or wildcard expression is required.
@@ -17,13 +19,15 @@ all indices by using `_all` or `*` as index.
[float]
=== Filtering index information
-The information returned by the get API can be filtered to include only specific features
+The information returned by the get API can be filtered to include only specific features
by specifying a comma delimited list of features in the URL:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings'
+GET twitter/_settings,_mappings
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The above command will only return the settings and mappings for the index called `twitter`.
diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc
index 317a708f13..c3580917d9 100644
--- a/docs/reference/indices/get-mapping.asciidoc
+++ b/docs/reference/indices/get-mapping.asciidoc
@@ -6,8 +6,10 @@ index/type.
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter/_mapping/tweet'
+GET /twitter/_mapping/tweet
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
=== Multiple Indices and Types
@@ -21,17 +23,21 @@ following are some examples:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/_mapping/twitter,kimchy'
+GET /_mapping/tweet,kimchy
-curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book'
+GET /_all/_mapping/tweet,book
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
If you want to get mappings of all indices and types then the following
two examples are equivalent:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/_all/_mapping'
+GET /_all/_mapping
-curl -XGET 'http://localhost:9200/_mapping'
+GET /_mapping
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc
index 4689c448b5..60d7a75a86 100644
--- a/docs/reference/indices/get-settings.asciidoc
+++ b/docs/reference/indices/get-settings.asciidoc
@@ -5,8 +5,10 @@ The get settings API allows to retrieve settings of index/indices:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/_settings'
+GET /twitter/_settings
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
=== Multiple Indices and Types
@@ -20,12 +22,15 @@ Wildcard expressions are also supported. The following are some examples:
[source,js]
--------------------------------------------------
-curl -XGET 'http://localhost:9200/twitter,kimchy/_settings'
+GET /twitter,kimchy/_settings
-curl -XGET 'http://localhost:9200/_all/_settings'
+GET /_all/_settings
-curl -XGET 'http://localhost:9200/2013-*/_settings'
+GET /log_2013_*/_settings
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+// TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/]
[float]
=== Filtering settings by name
diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc
index 1ffb1074a5..184c649c27 100644
--- a/docs/reference/indices/indices-exists.asciidoc
+++ b/docs/reference/indices/indices-exists.asciidoc
@@ -5,8 +5,10 @@ Used to check if the index (indices) exists or not. For example:
[source,js]
--------------------------------------------------
-curl -XHEAD -i 'http://localhost:9200/twitter'
+HEAD twitter
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The HTTP status code indicates if the index exists or not. A `404` means
it does not exist, and `200` means it does.
diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc
index afdab7beda..59f36112b4 100644
--- a/docs/reference/indices/open-close.asciidoc
+++ b/docs/reference/indices/open-close.asciidoc
@@ -12,10 +12,12 @@ example:
[source,js]
--------------------------------------------------
-curl -XPOST 'localhost:9200/my_index/_close'
+POST /my_index/_close
-curl -XPOST 'localhost:9200/my_index/_open'
+POST /my_index/_open
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT my_index\n/]
It is possible to open and close multiple indices. An error will be thrown
if the request explicitly refers to a missing index. This behaviour can be
diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc
index c4aabac3ac..448c423d0b 100644
--- a/docs/reference/indices/recovery.asciidoc
+++ b/docs/reference/indices/recovery.asciidoc
@@ -8,15 +8,19 @@ For example, the following command would show recovery information for the indic
[source,js]
--------------------------------------------------
-curl -XGET http://localhost:9200/index1,index2/_recovery
+GET index1,index2/_recovery?human
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT index1\nPUT index2\n/]
To see cluster-wide recovery status simply leave out the index names.
[source,js]
--------------------------------------------------
-curl -XGET http://localhost:9200/_recovery?pretty&human
+GET /_recovery?human
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT index1\n{"settings": {"index.number_of_shards": 1}}\n/]
Response:
[source,js]
@@ -30,7 +34,7 @@ Response:
"primary" : true,
"start_time" : "2014-02-24T12:15:59.716",
"start_time_in_millis": 1393244159716,
- "total_time" : "2.9m"
+ "total_time" : "2.9m",
"total_time_in_millis" : 175576,
"source" : {
"repository" : "my_repository",
@@ -45,7 +49,7 @@ Response:
},
"index" : {
"size" : {
- "total" : "75.4mb"
+ "total" : "75.4mb",
"total_in_bytes" : 79063092,
"reused" : "0b",
"reused_in_bytes" : 0,
@@ -68,7 +72,7 @@ Response:
"percent" : "100.0%",
"total_on_start" : 0,
"total_time" : "0s",
- "total_time_in_millis" : 0
+ "total_time_in_millis" : 0,
},
"start" : {
"check_index_time" : "0s",
@@ -80,6 +84,7 @@ Response:
}
}
--------------------------------------------------
+// We should really assert that this is up to date but that is hard!
The above response shows a single index recovering a single shard. In this case, the source of the recovery is a snapshot repository
and the target of the recovery is the node with name "my_es_node".
@@ -90,7 +95,7 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true
[source,js]
--------------------------------------------------
-curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true
+GET _recovery?human&detailed=true
--------------------------------------------------
Response:
@@ -170,6 +175,7 @@ Response:
}
}
--------------------------------------------------
+// We should really assert that this is up to date but that is hard!
This response shows a detailed listing (truncated for brevity) of the actual files recovered and their sizes.
diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc
index bbc1f20f40..1e27ace362 100644
--- a/docs/reference/indices/refresh.asciidoc
+++ b/docs/reference/indices/refresh.asciidoc
@@ -9,8 +9,10 @@ refresh is scheduled periodically.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/_refresh'
+POST /twitter/_refresh
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
=== Multi Index
@@ -20,7 +22,9 @@ call, or even on `_all` the indices.
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh'
+POST /kimchy,elasticsearch/_refresh
-$ curl -XPOST 'http://localhost:9200/_refresh'
+POST /_refresh
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc
index b12d93bb4b..6ee28e7b2f 100644
--- a/docs/reference/indices/rollover-index.asciidoc
+++ b/docs/reference/indices/rollover-index.asciidoc
@@ -19,7 +19,9 @@ PUT /logs-000001 <1>
}
}
-POST logs_write/_rollover <2>
+# Add > 1000 documents to logs-000001
+
+POST /logs_write/_rollover <2>
{
"conditions": {
"max_age": "7d",
@@ -28,6 +30,8 @@ POST logs_write/_rollover <2>
}
--------------------------------------------------
// CONSOLE
+// TEST[setup:huge_twitter]
+// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/]
<1> Creates an index called `logs-0000001` with the alias `logs_write`.
<2> If the index pointed to by `logs_write` was created 7 or more days ago, or
contains 1,000 or more documents, then the `logs-0002` index is created
@@ -38,6 +42,8 @@ The above request might return the following response:
[source,js]
--------------------------------------------------
{
+ "acknowledged": true,
+ "shards_acknowledged": true,
"old_index": "logs-000001",
"new_index": "logs-000002",
"rolled_over": true, <1>
@@ -48,9 +54,10 @@ The above request might return the following response:
}
}
--------------------------------------------------
- <1> Whether the index was rolled over.
- <2> Whether the rollover was dry run.
- <3> The result of each condition.
+// TESTRESPONSE
+<1> Whether the index was rolled over.
+<2> Whether the rollover was dry run.
+<3> The result of each condition.
[float]
=== Naming the new index
@@ -65,9 +72,16 @@ the new index as follows:
[source,js]
--------------------------------------------------
-POST my_alias/_rollover/my_new_index_name
-{...}
+POST /my_alias/_rollover/my_new_index_name
+{
+ "conditions": {
+ "max_age": "7d",
+ "max_docs": 1000
+ }
+}
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/]
[float]
=== Defining the new index
@@ -75,7 +89,7 @@ POST my_alias/_rollover/my_new_index_name
The settings, mappings, and aliases for the new index are taken from any
matching <<indices-templates,index templates>>. Additionally, you can specify
`settings`, `mappings`, and `aliases` in the body of the request, just like the
-<<indices-create-index,create index>> API. Values specified in the request
+<<indices-create-index,create index>> API. Values specified in the request
override any values set in matching index templates. For example, the following
`rollover` request overrides the `index.number_of_shards` setting:
@@ -88,14 +102,14 @@ PUT /logs-000001
}
}
-POST logs_write/_rollover
+POST /logs_write/_rollover
{
"conditions" : {
"max_age": "7d",
"max_docs": 1000
},
- "settings": {
- "index.number_of_shards": 2
+ "settings": {
+ "index.number_of_shards": 2
}
}
--------------------------------------------------
@@ -116,7 +130,7 @@ PUT /logs-000001
}
}
-POST logs_write/_rollover?dry_run
+POST /logs_write/_rollover?dry_run
{
"conditions" : {
"max_age": "7d",
@@ -129,6 +143,6 @@ POST logs_write/_rollover?dry_run
[float]
=== Wait For Active Shards
-Because the rollover operation creates a new index to rollover to, the
-<<create-index-wait-for-active-shards,wait for active shards>> setting on
+Because the rollover operation creates a new index to rollover to, the
+<<create-index-wait-for-active-shards,wait for active shards>> setting on
index creation applies to the rollover action as well.
diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc
index 2ee1d80ea9..027cf8b924 100644
--- a/docs/reference/indices/shrink-index.asciidoc
+++ b/docs/reference/indices/shrink-index.asciidoc
@@ -41,6 +41,8 @@ PUT /my_source_index/_settings
}
}
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT my_source_index\n/]
<1> Forces the relocation of a copy of each shard to the node with name
`shrink_node_name`. See <<shard-allocation-filtering>> for more options.
@@ -50,7 +52,7 @@ PUT /my_source_index/_settings
It can take a while to relocate the source index. Progress can be tracked
with the <<cat-recovery,`_cat recovery` API>>, or the <<cluster-health,
`cluster health` API>> can be used to wait until all shards have relocated
-with the `wait_for_relocating_shards` parameter.
+with the `wait_for_no_relocating_shards` parameter.
[float]
=== Shrinking an index
@@ -62,6 +64,8 @@ the following request:
--------------------------------------------------
POST my_source_index/_shrink/my_target_index
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
The above request returns immediately once the target index has been added to
the cluster state -- it doesn't wait for the shrink operation to start.
@@ -105,6 +109,8 @@ POST my_source_index/_shrink/my_target_index
}
}
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/]
<1> The number of shards in the target index. This must be a factor of the
number of shards in the source index.
@@ -139,6 +145,6 @@ replicas and may decide to relocate the primary shard to another node.
[float]
=== Wait For Active Shards
-Because the shrink operation creates a new index to shrink the shards to,
-the <<create-index-wait-for-active-shards,wait for active shards>> setting
+Because the shrink operation creates a new index to shrink the shards to,
+the <<create-index-wait-for-active-shards,wait for active shards>> setting
on index creation applies to the shrink index action as well.
diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc
index e990a7ff6b..a95b1c81ae 100644
--- a/docs/reference/indices/stats.asciidoc
+++ b/docs/reference/indices/stats.asciidoc
@@ -10,15 +10,18 @@ all indices:
[source,js]
--------------------------------------------------
-curl localhost:9200/_stats
+GET /_stats
--------------------------------------------------
+// CONSOLE
Specific index stats can be retrieved using:
[source,js]
--------------------------------------------------
-curl localhost:9200/index1,index2/_stats
+GET /index1,index2/_stats
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT index1\nPUT index2\n/]
By default, all stats are returned, returning only specific stats can be
specified as well in the URI. Those stats can be any of:
@@ -74,12 +77,14 @@ Here are some samples:
[source,js]
--------------------------------------------------
# Get back stats for merge and refresh only for all indices
-curl 'localhost:9200/_stats/merge,refresh'
+GET /_stats/merge,refresh
# Get back stats for type1 and type2 documents for the my_index index
-curl 'localhost:9200/my_index/_stats/indexing?types=type1,type2
+GET /my_index/_stats/indexing?types=type1,type2
# Get back just search stats for group1 and group2
-curl 'localhost:9200/_stats/search?groups=group1,group2
+GET /_stats/search?groups=group1,group2
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT my_index\n/]
The stats returned are aggregated on the index level, with
`primaries` and `total` aggregations, where `primaries` are the values for only the
@@ -91,4 +96,3 @@ Note, as shards move around the cluster, their stats will be cleared as
they are created on other nodes. On the other hand, even though a shard
"left" a node, that node will still retain the stats that shard
contributed to.
-
diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc
index 754a93e309..6e2f7ce91f 100644
--- a/docs/reference/indices/templates.asciidoc
+++ b/docs/reference/indices/templates.asciidoc
@@ -38,6 +38,7 @@ PUT _template/template_1
}
--------------------------------------------------
// CONSOLE
+// TESTSETUP
Defines a template named template_1, with a template pattern of `te*`.
The settings and mappings will be applied to any index name that matches
@@ -47,7 +48,7 @@ It is also possible to include aliases in an index template as follows:
[source,js]
--------------------------------------------------
-curl -XPUT localhost:9200/_template/template_1 -d '
+PUT _template/template_1
{
"template" : "te*",
"settings" : {
@@ -64,8 +65,9 @@ curl -XPUT localhost:9200/_template/template_1 -d '
"{index}-alias" : {} <1>
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/DELETE _template\/template_1\n/]
<1> the `{index}` placeholder within the alias name will be replaced with the
actual index name that the template gets applied to during index creation.
@@ -79,8 +81,9 @@ Index templates are identified by a name (in the above case
[source,js]
--------------------------------------------------
-curl -XDELETE localhost:9200/_template/template_1
+DELETE /_template/template_1
--------------------------------------------------
+// CONSOLE
[float]
[[getting]]
@@ -91,24 +94,26 @@ Index templates are identified by a name (in the above case
[source,js]
--------------------------------------------------
-curl -XGET localhost:9200/_template/template_1
+GET /_template/template_1
--------------------------------------------------
+// CONSOLE
You can also match several templates by using wildcards like:
[source,js]
--------------------------------------------------
-curl -XGET localhost:9200/_template/temp*
-curl -XGET localhost:9200/_template/template_1,template_2
+GET /_template/temp*
+GET /_template/template_1,template_2
--------------------------------------------------
+// CONSOLE
To get list of all index templates you can run:
[source,js]
--------------------------------------------------
-curl -XGET localhost:9200/_template/
+GET /_template
--------------------------------------------------
-
+// CONSOLE
[float]
[[indices-templates-exists]]
@@ -118,13 +123,13 @@ Used to check if the template exists or not. For example:
[source,js]
-----------------------------------------------
-curl -XHEAD -i localhost:9200/_template/template_1
+HEAD _template/template_1
-----------------------------------------------
+// CONSOLE
The HTTP status code indicates if the template with the given name
exists or not. A status code `200` means it exists, a `404` it does not.
-
[float]
[[multiple-templates]]
=== Multiple Template Matching
@@ -137,7 +142,7 @@ orders overriding them. For example:
[source,js]
--------------------------------------------------
-curl -XPUT localhost:9200/_template/template_1 -d '
+PUT /_template/template_1
{
"template" : "*",
"order" : 0,
@@ -150,9 +155,8 @@ curl -XPUT localhost:9200/_template/template_1 -d '
}
}
}
-'
-curl -XPUT localhost:9200/_template/template_2 -d '
+PUT /_template/template_2
{
"template" : "te*",
"order" : 1,
@@ -165,8 +169,9 @@ curl -XPUT localhost:9200/_template/template_2 -d '
}
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/DELETE _template\/template_1\n/]
The above will disable storing the `_source` on all `type1` types, but
for indices of that start with `te*`, source will still be enabled.
diff --git a/docs/reference/indices/types-exists.asciidoc b/docs/reference/indices/types-exists.asciidoc
index 739e0e9bf3..29a9be29e0 100644
--- a/docs/reference/indices/types-exists.asciidoc
+++ b/docs/reference/indices/types-exists.asciidoc
@@ -5,8 +5,10 @@ Used to check if a type/types exists in an index/indices.
[source,js]
--------------------------------------------------
-curl -XHEAD -i 'http://localhost:9200/twitter/tweet'
+HEAD twitter/_mapping/tweet
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The HTTP status code indicates if the type exists or not. A `404` means
it does not exist, and `200` means it does.
diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc
index 0b08ee1ae3..48a0bc67e6 100644
--- a/docs/reference/ingest/ingest-node.asciidoc
+++ b/docs/reference/ingest/ingest-node.asciidoc
@@ -706,10 +706,11 @@ such a case, `target_field` will still be updated with the unconverted field val
.Convert Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The field whose value is to be converted
-| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
-| `type` | yes | - | The type to convert the existing value to
+| Name | Required | Default | Description
+| `field` | yes | - | The field whose value is to be converted
+| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
+| `type` | yes | - | The type to convert the existing value to
+| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|======
[source,js]
@@ -931,6 +932,12 @@ to the requester.
[[foreach-processor]]
=== Foreach Processor
+
+experimental[This processor may change or be replaced by something else that provides similar functionality. This
+processor executes in its own context, which makes it different compared to all other processors and for features like
+verbose simulation the subprocessor isn't visible. The reason we still expose this processor, is that it is the only
+processor that can operate on an array]
+
Processes elements in an array of unknown length.
All processors can operate on elements inside an array, but if all elements of an array need to
@@ -1142,6 +1149,7 @@ Grok expression.
| `patterns` | yes | - | An ordered list of grok expression to match and extract named captures with. Returns on the first expression in the list that matches.
| `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition.
| `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched.
+| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|======
Here is an example of using the provided patterns to extract out and name structured fields from a string field in
@@ -1270,6 +1278,28 @@ Throws an error when the field is not an array.
}
--------------------------------------------------
+[[json-processor]]
+=== JSON Processor
+Converts a JSON string into a structured JSON object.
+
+[[json-options]]
+.Json Options
+[options="header"]
+|======
+| Name | Required | Default | Description
+| `field` | yes | - | The field to be parsed
+| `target_field` | no | `field` | The field to insert the converted structured object into
+|======
+
+[source,js]
+--------------------------------------------------
+{
+ "json": {
+ "field": "{\"foo\": 2000}"
+ }
+}
+--------------------------------------------------
+
[[lowercase-processor]]
=== Lowercase Processor
Converts a string to its lowercase equivalent.
@@ -1278,8 +1308,9 @@ Converts a string to its lowercase equivalent.
.Lowercase Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The field to make lowercase
+| Name | Required | Default | Description
+| `field` | yes | - | The field to make lowercase
+| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|======
[source,js]
@@ -1320,9 +1351,10 @@ Renames an existing field. If the field doesn't exist or the new name is already
.Rename Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The field to be renamed
-| `target_field` | yes | - | The new name of the field
+| Name | Required | Default | Description
+| `field` | yes | - | The field to be renamed
+| `target_field` | yes | - | The new name of the field
+| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|======
[source,js]
@@ -1351,25 +1383,27 @@ caching see <<modules-scripting-using-caching, Script Caching>>.
[options="header"]
|======
| Name | Required | Default | Description
-| `field` | yes | - | The field to set
| `lang` | no | - | The scripting language
| `file` | no | - | The script file to refer to
| `id` | no | - | The stored script id to refer to
| `inline` | no | - | An inline script to be executed
+| `params` | no | - | Script Parameters
|======
You can access the current ingest document from within the script context by using the `ctx` variable.
-The following example sets a new field called `field_a_plus_b` to be the sum of two existing
-numeric fields `field_a` and `field_b`:
+The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing
+numeric fields `field_a` and `field_b` multiplied by the parameter param_c:
[source,js]
--------------------------------------------------
{
"script": {
- "field": "field_a_plus_b",
"lang": "painless",
- "inline": "return ctx.field_a + ctx.field_b"
+ "inline": "ctx.field_a_plus_b_times_c = (ctx.field_a + ctx.field_b) * params.param_c",
+ "params": {
+ "param_c": 10
+ }
}
}
--------------------------------------------------
@@ -1459,8 +1493,9 @@ NOTE: This only works on leading and trailing whitespace.
.Trim Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The string-valued field to trim whitespace from
+| Name | Required | Default | Description
+| `field` | yes | - | The string-valued field to trim whitespace from
+| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|======
[source,js]
@@ -1480,8 +1515,9 @@ Converts a string to its uppercase equivalent.
.Uppercase Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The field to make uppercase
+| Name | Required | Default | Description
+| `field` | yes | - | The field to make uppercase
+| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|======
[source,js]
@@ -1492,3 +1528,115 @@ Converts a string to its uppercase equivalent.
}
}
--------------------------------------------------
+
+[[dot-expand-processor]]
+=== Dot Expander Processor
+
+Expands a field with dots into an object field. This processor allows fields
+with dots in the name to be accessible by other processors in the pipeline.
+Otherwise these <<accessing-data-in-pipelines,fields> can't be accessed by any processor.
+
+[[dot-expender-options]]
+.Dot Expand Options
+[options="header"]
+|======
+| Name | Required | Default | Description
+| `field` | yes | - | The field to expand into an object field
+| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields.
+|======
+
+[source,js]
+--------------------------------------------------
+{
+ "dot_expander": {
+ "field": "foo.bar"
+ }
+}
+--------------------------------------------------
+
+For example the dot expand processor would turn this document:
+
+[source,js]
+--------------------------------------------------
+{
+ "foo.bar" : "value"
+}
+--------------------------------------------------
+
+into:
+
+[source,js]
+--------------------------------------------------
+{
+ "foo" : {
+ "bar" : "value"
+ }
+}
+--------------------------------------------------
+
+If there is already a `bar` field nested under `foo` then
+this processor merges the the `foo.bar` field into it. If the field is
+a scalar value then it will turn that field into an array field.
+
+For example, the following document:
+
+[source,js]
+--------------------------------------------------
+{
+ "foo.bar" : "value2",
+ "foo" : {
+ "bar" : "value1"
+ }
+}
+--------------------------------------------------
+
+is transformed by the `dot_expander` processor into:
+
+[source,js]
+--------------------------------------------------
+{
+ "foo" : {
+ "bar" : ["value1", "value2"]
+ }
+}
+--------------------------------------------------
+
+If any field outside of the leaf field conflicts with a pre-existing field of the same name,
+then that field needs to be renamed first.
+
+Consider the following document:
+
+[source,js]
+--------------------------------------------------
+{
+ "foo": "value1",
+ "foo.bar": "value2"
+}
+--------------------------------------------------
+
+Then the the `foo` needs to be renamed first before the `dot_expander`
+processor is applied. So in order for the `foo.bar` field to properly
+be expanded into the `bar` field under the `foo` field the following
+pipeline should be used:
+
+[source,js]
+--------------------------------------------------
+{
+ "processors" : [
+ {
+ "rename" : {
+ "field" : "foo",
+ "target_field" : "foo.bar""
+ }
+ },
+ {
+ "dot_expander": {
+ "field": "foo.bar"
+ }
+ }
+ ]
+}
+--------------------------------------------------
+
+The reason for this is that Ingest doesn't know how to automatically cast
+a scalar field to an object field. \ No newline at end of file
diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc
index 5507ba97ff..dffb4f94ff 100644
--- a/docs/reference/mapping.asciidoc
+++ b/docs/reference/mapping.asciidoc
@@ -64,6 +64,29 @@ the <<analysis-standard-analyzer,`standard` analyzer>>, the
This is the purpose of _multi-fields_. Most datatypes support multi-fields
via the <<multi-fields>> parameter.
+[[mapping-limit-settings]]
+[float]
+=== Settings to prevent mappings explosion
+
+The following settings allow you to limit the number of field mappings that
+can be created manually or dynamically, in order to prevent bad documents from
+causing a mapping explosion:
+
+`index.mapping.total_fields.limit`::
+ The maximum number of fields in an index. The default value is `1000`.
+
+`index.mapping.depth.limit`::
+ The maximum depth for a field, which is measured as the number of inner
+ objects. For instance, if all fields are defined at the root object level,
+ then the depth is `1`. If there is one object mapping, then the depth is
+ `2`, etc. The default is `20`.
+
+`index.mapping.nested_fields.limit`::
+ The maximum number of `nested` fields in an index, defaults to `50`.
+ Indexing 1 document with 100 nested fields actually indexes 101 documents
+ as each nested document is indexed as a separate hidden document.
+
+
[float]
== Dynamic mapping
diff --git a/docs/reference/mapping/dynamic-mapping.asciidoc b/docs/reference/mapping/dynamic-mapping.asciidoc
index 2370684ff4..adc7d8675c 100644
--- a/docs/reference/mapping/dynamic-mapping.asciidoc
+++ b/docs/reference/mapping/dynamic-mapping.asciidoc
@@ -40,15 +40,32 @@ automatically or explicitly.
[float]
=== Disabling automatic type creation
-Automatic type creation can be disabled by setting the `index.mapper.dynamic`
-setting to `false`, either by setting the default value in the
-`config/elasticsearch.yml` file, or per-index as an index setting:
+Automatic type creation can be disabled per-index by setting the `index.mapper.dynamic`
+setting to `false` in the index settings:
[source,js]
--------------------------------------------------
-PUT data/_settings <1>
+PUT data/_settings
{
- "index.mapper.dynamic":false
+ "index.mapper.dynamic":false <1>
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+<1> Disable automatic type creation for the index named "data".
+
+Automatic type creation can also be disabled for all indices by setting an index template:
+
+[source,js]
+--------------------------------------------------
+PUT _template/template_all
+{
+ "template": "*",
+ "order":0,
+ "settings": {
+ "index.mapper.dynamic": false <1>
+ }
}
--------------------------------------------------
// CONSOLE
diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc
index 020c5d2455..7bed12b5b7 100644
--- a/docs/reference/mapping/dynamic/field-mapping.asciidoc
+++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc
@@ -30,20 +30,6 @@ detected. All other datatypes must be mapped explicitly.
Besides the options listed below, dynamic field mapping rules can be further
customised with <<dynamic-templates,`dynamic_templates`>>.
-[[mapping-limit-settings]]
-==== Settings to prevent mappings explosion
-
-Two settings allow to control mapping explosion, in order to prevent adversary
-documents to create huge mappings through dynamic mappings for instance:
-
-`index.mapping.total_fields.limit`::
- The maximum number of fields in an index. The default value is `1000`.
-`index.mapping.depth.limit`::
- The maximum depth for a field, which is measured as the number of nested
- objects. For instance, if all fields are defined at the root object level,
- then the depth is `1`. If there is one object mapping, then the depth is
- `2`, etc. The default is `20`.
-
[[date-detection]]
==== Date detection
diff --git a/docs/reference/mapping/fields/all-field.asciidoc b/docs/reference/mapping/fields/all-field.asciidoc
index 70fac4ed06..aa957247a3 100644
--- a/docs/reference/mapping/fields/all-field.asciidoc
+++ b/docs/reference/mapping/fields/all-field.asciidoc
@@ -23,7 +23,7 @@ GET my_index/_search
{
"query": {
"match": {
- "_all": "john smith 1970"
+ "_all": "john smith new york"
}
}
}
@@ -72,7 +72,7 @@ GET _search
{
"query": {
"query_string": {
- "query": "john smith 1970"
+ "query": "john smith new york"
}
}
}
@@ -84,7 +84,7 @@ requests>> (which is rewritten to a `query_string` query internally):
[source,js]
--------------------------------
-GET _search?q=john+smith+1970
+GET _search?q=john+smith+new+york
--------------------------------
Other queries, such as the <<query-dsl-match-query,`match`>> and
diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc
index cf8c6398d2..815606fb7b 100644
--- a/docs/reference/mapping/fields/field-names-field.asciidoc
+++ b/docs/reference/mapping/fields/field-names-field.asciidoc
@@ -6,7 +6,7 @@ contains any value other than `null`. This field is used by the
<<query-dsl-exists-query,`exists`>> query to find documents that
either have or don't have any non-+null+ value for a particular field.
-The value of the `_field_name` field is accessible in queries and scripts:
+The value of the `_field_name` field is accessible in queries:
[source,js]
--------------------------
@@ -16,7 +16,7 @@ PUT my_index/my_type/1
"title": "This is a document"
}
-PUT my_index/my_type/1
+PUT my_index/my_type/2?refresh=true
{
"title": "This is another document",
"body": "This document has a body"
@@ -28,19 +28,10 @@ GET my_index/_search
"terms": {
"_field_names": [ "title" ] <1>
}
- },
- "script_fields": {
- "Field names": {
- "script": {
- "lang": "painless",
- "inline": "doc['_field_names']" <2>
- }
- }
}
}
--------------------------
// CONSOLE
-<1> Querying on the `_field_names` field (also see the <<query-dsl-exists-query,`exists`>> query)
-<2> Accessing the `_field_names` field in scripts
+<1> Querying on the `_field_names` field (also see the <<query-dsl-exists-query,`exists`>> query) \ No newline at end of file
diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc
index c640b56157..94c95a8bbe 100644
--- a/docs/reference/mapping/fields/id-field.asciidoc
+++ b/docs/reference/mapping/fields/id-field.asciidoc
@@ -19,7 +19,7 @@ PUT my_index/my_type/1
"text": "Document with ID 1"
}
-PUT my_index/my_type/2
+PUT my_index/my_type/2&refresh=true
{
"text": "Document with ID 2"
}
diff --git a/docs/reference/mapping/fields/index-field.asciidoc b/docs/reference/mapping/fields/index-field.asciidoc
index 599fedba62..cd285c4255 100644
--- a/docs/reference/mapping/fields/index-field.asciidoc
+++ b/docs/reference/mapping/fields/index-field.asciidoc
@@ -21,7 +21,7 @@ PUT index_1/my_type/1
"text": "Document in index 1"
}
-PUT index_2/my_type/2
+PUT index_2/my_type/2?refresh=true
{
"text": "Document in index 2"
}
diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc
index 30b2e1a208..eddf9ef3a2 100644
--- a/docs/reference/mapping/fields/parent-field.asciidoc
+++ b/docs/reference/mapping/fields/parent-field.asciidoc
@@ -28,7 +28,7 @@ PUT my_index/my_child/2?parent=1 <3>
"text": "This is a child document"
}
-PUT my_index/my_child/3?parent=1 <3>
+PUT my_index/my_child/3?parent=1&refresh=true <3>
{
"text": "This is another child document"
}
@@ -103,7 +103,7 @@ GET my_index/_search
cannot be established between documents of the same type.
* The `_parent.type` setting can only point to a type that doesn't exist yet.
- This means that a type cannot become a parent type after it is has been
+ This means that a type cannot become a parent type after it has been
created.
* Parent and child documents must be indexed on the same shard. The `parent`
diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc
index c8a92de136..bbdef3370c 100644
--- a/docs/reference/mapping/fields/routing-field.asciidoc
+++ b/docs/reference/mapping/fields/routing-field.asciidoc
@@ -14,7 +14,7 @@ value per document. For instance:
[source,js]
------------------------------
-PUT my_index/my_type/1?routing=user1 <1>
+PUT my_index/my_type/1?routing=user1&refresh=true <1>
{
"title": "This is a document"
}
@@ -29,7 +29,7 @@ GET my_index/my_type/1?routing=user1 <2>
<<docs-get,getting>>, <<docs-delete,deleting>>, or <<docs-update,updating>>
the document.
-The value of the `_routing` field is accessible in queries and scripts:
+The value of the `_routing` field is accessible in queries:
[source,js]
--------------------------
@@ -39,22 +39,12 @@ GET my_index/_search
"terms": {
"_routing": [ "user1" ] <1>
}
- },
- "script_fields": {
- "Routing value": {
- "script": {
- "lang": "painless",
- "inline": "doc['_routing']" <2>
- }
- }
}
}
--------------------------
// CONSOLE
<1> Querying on the `_routing` field (also see the <<query-dsl-ids-query,`ids` query>>)
-<2> Accessing the `_routing` field in scripts
-
==== Searching with custom routing
diff --git a/docs/reference/mapping/fields/type-field.asciidoc b/docs/reference/mapping/fields/type-field.asciidoc
index fecc0143e9..a84b2508de 100644
--- a/docs/reference/mapping/fields/type-field.asciidoc
+++ b/docs/reference/mapping/fields/type-field.asciidoc
@@ -16,7 +16,7 @@ PUT my_index/type_1/1
"text": "Document with type 1"
}
-PUT my_index/type_2/2
+PUT my_index/type_2/2?refresh=true
{
"text": "Document with type 2"
}
diff --git a/docs/reference/mapping/fields/uid-field.asciidoc b/docs/reference/mapping/fields/uid-field.asciidoc
index 8209516064..bad9eb4768 100644
--- a/docs/reference/mapping/fields/uid-field.asciidoc
+++ b/docs/reference/mapping/fields/uid-field.asciidoc
@@ -16,7 +16,7 @@ PUT my_index/my_type/1
"text": "Document with ID 1"
}
-PUT my_index/my_type/2
+PUT my_index/my_type/2?refresh=true
{
"text": "Document with ID 2"
}
diff --git a/docs/reference/mapping/params.asciidoc b/docs/reference/mapping/params.asciidoc
index 5134ab6733..e7d2d7ac0c 100644
--- a/docs/reference/mapping/params.asciidoc
+++ b/docs/reference/mapping/params.asciidoc
@@ -15,15 +15,11 @@ The following mapping parameters are common to some or all field datatypes:
* <<dynamic,`dynamic`>>
* <<enabled,`enabled`>>
* <<fielddata,`fielddata`>>
-* <<geohash,`geohash`>>
-* <<geohash-precision,`geohash_precision`>>
-* <<geohash-prefix,`geohash_prefix`>>
* <<mapping-date-format,`format`>>
* <<ignore-above,`ignore_above`>>
* <<ignore-malformed,`ignore_malformed`>>
* <<include-in-all,`include_in_all`>>
* <<index-options,`index_options`>>
-* <<lat-lon,`lat_lon`>>
* <<mapping-index,`index`>>
* <<multi-fields,`fields`>>
* <<norms,`norms`>>
@@ -54,12 +50,6 @@ include::params/fielddata.asciidoc[]
include::params/format.asciidoc[]
-include::params/geohash.asciidoc[]
-
-include::params/geohash-precision.asciidoc[]
-
-include::params/geohash-prefix.asciidoc[]
-
include::params/ignore-above.asciidoc[]
include::params/ignore-malformed.asciidoc[]
@@ -70,8 +60,6 @@ include::params/index.asciidoc[]
include::params/index-options.asciidoc[]
-include::params/lat-lon.asciidoc[]
-
include::params/multi-fields.asciidoc[]
include::params/norms.asciidoc[]
diff --git a/docs/reference/mapping/params/geohash-precision.asciidoc b/docs/reference/mapping/params/geohash-precision.asciidoc
deleted file mode 100644
index 6e4e859d03..0000000000
--- a/docs/reference/mapping/params/geohash-precision.asciidoc
+++ /dev/null
@@ -1,60 +0,0 @@
-[[geohash-precision]]
-=== `geohash_precision`
-
-Geohashes are a form of lat/lon encoding which divides the earth up into
-a grid. Each cell in this grid is represented by a geohash string. Each
-cell in turn can be further subdivided into smaller cells which are
-represented by a longer string. So the longer the geohash, the smaller
-(and thus more accurate) the cell is.
-
-The `geohash_precision` setting controls the length of the geohash that is
-indexed when the <<geohash,`geohash`>> option is enabled, and the maximum
-geohash length when the <<geohash-prefix,`geohash_prefix`>> option is enabled.
-
-It accepts:
-
-* a number between 1 and 12 (default), which represents the length of the geohash.
-* a <<distance-units,distance>>, e.g. `1km`.
-
-If a distance is specified, it will be translated to the smallest
-geohash-length that will provide the requested resolution.
-
-For example:
-
-[source,js]
---------------------------------------------------
-PUT my_index
-{
- "mappings": {
- "my_type": {
- "properties": {
- "location": {
- "type": "geo_point",
- "geohash_prefix": true,
- "geohash_precision": 6 <1>
- }
- }
- }
- }
-}
-
-PUT my_index/my_type/1
-{
- "location": {
- "lat": 41.12,
- "lon": -71.34
- }
-}
-
-GET my_index/_search?fielddata_fields=location.geohash
-{
- "query": {
- "term": {
- "location.geohash": "drm3bt"
- }
- }
-}
-
---------------------------------------------------
-// CONSOLE
-<1> A `geohash_precision` of 6 equates to geohash cells of approximately 1.26km x 0.6km
diff --git a/docs/reference/mapping/params/geohash-prefix.asciidoc b/docs/reference/mapping/params/geohash-prefix.asciidoc
deleted file mode 100644
index 56f0b42ae8..0000000000
--- a/docs/reference/mapping/params/geohash-prefix.asciidoc
+++ /dev/null
@@ -1,64 +0,0 @@
-[[geohash-prefix]]
-=== `geohash_prefix`
-
-Geohashes are a form of lat/lon encoding which divides the earth up into
-a grid. Each cell in this grid is represented by a geohash string. Each
-cell in turn can be further subdivided into smaller cells which are
-represented by a longer string. So the longer the geohash, the smaller
-(and thus more accurate) the cell is.
-
-While the <<geohash,`geohash`>> option enables indexing the geohash that
-corresponds to the lat/lon point, at the specified
-<<geohash-precision,precision>>, the `geohash_prefix` option will also
-index all the enclosing cells as well.
-
-For instance, a geohash of `drm3btev3e86` will index all of the following
-terms: [ `d`, `dr`, `drm`, `drm3`, `drm3b`, `drm3bt`, `drm3bte`, `drm3btev`,
-`drm3btev3`, `drm3btev3e`, `drm3btev3e8`, `drm3btev3e86` ].
-
-The geohash prefixes can be used with the
-<<query-dsl-geohash-cell-query,`geohash_cell` query>> to find points within a
-particular geohash, or its neighbours:
-
-
-[source,js]
---------------------------------------------------
-PUT my_index
-{
- "mappings": {
- "my_type": {
- "properties": {
- "location": {
- "type": "geo_point",
- "geohash_prefix": true,
- "geohash_precision": 6
- }
- }
- }
- }
-}
-
-PUT my_index/my_type/1
-{
- "location": {
- "lat": 41.12,
- "lon": -71.34
- }
-}
-
-GET my_index/_search?fielddata_fields=location.geohash
-{
- "query": {
- "geohash_cell": {
- "location": {
- "lat": 41.02,
- "lon": -71.48
- },
- "precision": 4, <1>
- "neighbors": true <1>
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-
diff --git a/docs/reference/mapping/params/geohash.asciidoc b/docs/reference/mapping/params/geohash.asciidoc
deleted file mode 100644
index d362d37343..0000000000
--- a/docs/reference/mapping/params/geohash.asciidoc
+++ /dev/null
@@ -1,70 +0,0 @@
-[[geohash]]
-=== `geohash`
-
-Geohashes are a form of lat/lon encoding which divides the earth up into
-a grid. Each cell in this grid is represented by a geohash string. Each
-cell in turn can be further subdivided into smaller cells which are
-represented by a longer string. So the longer the geohash, the smaller
-(and thus more accurate) the cell is.
-
-Because geohashes are just strings, they can be stored in an inverted
-index like any other string, which makes querying them very efficient.
-
-If you enable the `geohash` option, a `geohash` ``sub-field'' will be indexed
-as, eg `.geohash`. The length of the geohash is controlled by the
-<<geohash-precision,`geohash_precision`>> parameter.
-
-If the <<geohash-prefix,`geohash_prefix`>> option is enabled, the `geohash`
-option will be enabled automatically.
-
-For example:
-
-[source,js]
---------------------------------------------------
-PUT my_index
-{
- "mappings": {
- "my_type": {
- "properties": {
- "location": {
- "type": "geo_point", <1>
- "geohash": true
- }
- }
- }
- }
-}
-
-
-PUT my_index/my_type/1
-{
- "location": {
- "lat": 41.12,
- "lon": -71.34
- }
-}
-
-GET my_index/_search?fielddata_fields=location.geohash <2>
-{
- "query": {
- "prefix": {
- "location.geohash": "drm3b" <3>
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-<1> A `location.geohash` field will be indexed for each geo-point.
-<2> The geohash can be retrieved with <<doc-values,`doc_values`>>.
-<3> A <<query-dsl-prefix-query,`prefix`>> query can find all geohashes which start with a particular prefix.
-
-[WARNING]
-============================================
-
-A `prefix` query on geohashes is expensive. Instead, consider using the
-<<geohash-prefix,`geohash_prefix`>> to pay the expense once at index time
-instead of on every query.
-
-============================================
-
-
diff --git a/docs/reference/mapping/params/lat-lon.asciidoc b/docs/reference/mapping/params/lat-lon.asciidoc
deleted file mode 100644
index 88c91c30d0..0000000000
--- a/docs/reference/mapping/params/lat-lon.asciidoc
+++ /dev/null
@@ -1,72 +0,0 @@
-[[lat-lon]]
-=== `lat_lon`
-
-deprecated[5.0.0, ????????]
-// https://github.com/elastic/elasticsearch/issues/19792
-
-<<geo-queries,Geo-queries>> are usually performed by plugging the value of
-each <<geo-point,`geo_point`>> field into a formula to determine whether it
-falls into the required area or not. Unlike most queries, the inverted index
-is not involved.
-
-Setting `lat_lon` to `true` causes the latitude and longitude values to be
-indexed as numeric fields (called `.lat` and `.lon`). These fields can be used
-by the <<query-dsl-geo-bounding-box-query,`geo_bounding_box`>> and
-<<query-dsl-geo-distance-query,`geo_distance`>> queries instead of
-performing in-memory calculations. So this mapping:
-
-[source,js]
---------------------------------------------------
-PUT my_index
-{
- "mappings": {
- "my_type": {
- "properties": {
- "location": {
- "type": "geo_point",
- "lat_lon": true <1>
- }
- }
- }
- }
-}
---------------------------------------------------
-// TEST[warning:geo_point lat_lon parameter is deprecated and will be removed in the next major release]
-<1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields.
-
-Allows these actions:
-
-[source,js]
---------------------------------------------------
-PUT my_index/my_type/1?refresh
-{
- "location": {
- "lat": 41.12,
- "lon": -71.34
- }
-}
-
-
-GET my_index/_search
-{
- "query": {
- "geo_distance": {
- "location": {
- "lat": 41,
- "lon": -71
- },
- "distance": "50km",
- "optimize_bbox": "indexed" <1>
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-// TEST[continued]
-<1> The `indexed` option tells the geo-distance query to use the inverted index instead of the in-memory calculation.
-
-Whether the in-memory or indexed operation performs better depends both on
-your dataset and on the types of queries that you are running.
-
-NOTE: The `lat_lon` option only makes sense for single-value `geo_point`
-fields. It will not work with arrays of geo-points.
diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc
index a05723cd19..8408dc89ae 100644
--- a/docs/reference/mapping/types.asciidoc
+++ b/docs/reference/mapping/types.asciidoc
@@ -35,12 +35,6 @@ string:: <<text,`text`>> and <<keyword,`keyword`>>
<<token-count>>:: `token_count` to count the number of tokens in a string
{plugins}/mapper-size.html[`mapper-murmur3`]:: `murmur3` to compute hashes of values at index-time and store them in the index
-Attachment datatype::
-
- See the {plugins}/mapper-attachments.html[`mapper-attachments`] plugin
- which supports indexing `attachments` like Microsoft Office formats, Open
- Document formats, ePub, HTML, etc. into an `attachment` datatype.
-
<<percolator>>:: Accepts queries from the query-dsl
[float]
diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc
index cf2ae13a99..a839117c7c 100644
--- a/docs/reference/mapping/types/geo-point.asciidoc
+++ b/docs/reference/mapping/types/geo-point.asciidoc
@@ -5,8 +5,7 @@ Fields of type `geo_point` accept latitude-longitude pairs, which can be used:
* to find geo-points within a <<query-dsl-geo-bounding-box-query,bounding box>>,
within a certain <<query-dsl-geo-distance-query,distance>> of a central point,
- within a <<query-dsl-geo-polygon-query,polygon>>, or within a
- <<query-dsl-geohash-cell-query,geohash>> cell.
+ or within a <<query-dsl-geo-polygon-query,polygon>>.
* to aggregate documents by <<search-aggregations-bucket-geohashgrid-aggregation,geographically>>
or by <<search-aggregations-bucket-geodistance-aggregation,distance>> from a central point.
* to integrate distance into a document's <<query-dsl-function-score-query,relevance score>>.
@@ -101,31 +100,11 @@ The following parameters are accepted by `geo_point` fields:
[horizontal]
-<<geohash,`geohash`>>::
-
- Should the geo-point also be indexed as a geohash in the `.geohash`
- sub-field? Defaults to `false`, unless `geohash_prefix` is `true`.
-
-<<geohash-precision,`geohash_precision`>>::
-
- The maximum length of the geohash to use for the `geohash` and
- `geohash_prefix` options.
-
-<<geohash-prefix,`geohash_prefix`>>::
-
- Should the geo-point also be indexed as a geohash plus all its prefixes?
- Defaults to `false`.
-
<<ignore-malformed,`ignore_malformed`>>::
If `true`, malformed geo-points are ignored. If `false` (default),
malformed geo-points throw an exception and reject the whole document.
-<<lat-lon,`lat_lon`>>::
-
- Should the geo-point also be indexed as `.lat` and `.lon` sub-fields?
- Accepts `true` and `false` (default).
-
==== Using geo-points in scripts
When accessing the value of a geo-point in a script, the value is returned as
diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc
index 2ae1008c01..fa260bbeff 100644
--- a/docs/reference/mapping/types/keyword.asciidoc
+++ b/docs/reference/mapping/types/keyword.asciidoc
@@ -109,3 +109,8 @@ The following parameters are accepted by `keyword` fields:
Which scoring algorithm or _similarity_ should be used. Defaults
to `classic`, which uses TF/IDF.
+NOTE: Indexes imported from 2.x do not support `keyword`. Instead they will
+attempt to downgrade `keyword` into `string`. This allows you to merge modern
+mappings with legacy mappings. Long lived indexes will have to be recreated
+before upgrading to 6.x but mapping downgrade gives you the opportunity to do
+the recreation on your own schedule.
diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc
index 8850763cdc..91bbf98c92 100644
--- a/docs/reference/mapping/types/nested.asciidoc
+++ b/docs/reference/mapping/types/nested.asciidoc
@@ -205,5 +205,5 @@ phase. Instead, highlighting needs to be performed via
Indexing a document with 100 nested fields actually indexes 101 documents as each nested
document is indexed as a separate document. To safeguard against ill-defined mappings
-the number of nested fields that can be defined per index has been limited to 50. This
-default limit can be changed with the index setting `index.mapping.nested_fields.limit`.
+the number of nested fields that can be defined per index has been limited to 50. See
+<<mapping-limit-settings>>.
diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc
index ca8c8386e9..65f0d72550 100644
--- a/docs/reference/mapping/types/percolator.asciidoc
+++ b/docs/reference/mapping/types/percolator.asciidoc
@@ -82,4 +82,7 @@ time (using `now`).
There are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when
using terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these
queries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator`
-query evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used. \ No newline at end of file
+query evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used. Important to note
+is that fetching of terms that these queries do, happens both each time the percolator query gets indexed on both primary
+and replica shards, so the terms that are actually indexed can be different between shard copies, if the source index
+changed while indexing. \ No newline at end of file
diff --git a/docs/reference/mapping/types/string.asciidoc b/docs/reference/mapping/types/string.asciidoc
index 88ca4e1d92..b6a5fef86e 100644
--- a/docs/reference/mapping/types/string.asciidoc
+++ b/docs/reference/mapping/types/string.asciidoc
@@ -1,4 +1,18 @@
[[string]]
=== String datatype
-NOTE: The `string` field has been removed in favor of the `text` and `keyword` fields.
+The `string` field is unsupported for indexes created in 5.x in favor of the
+`text` and `keyword` fields. Attempting to create a string field in an index
+created in 5.x will cause Elasticsearch to attempt to upgrade the `string` into
+the appropriate `text` or `keyword` field. It will return an HTTP `Warning`
+header telling you that `string` is deprecated. This upgrade process isn't
+always perfect because there are some combinations of features that are
+supported by `string` but not `text` or `keyword`. For that reason it is better
+to use `text` or `keyword`.
+
+Indexes imported from 2.x *only* support `string` and not `text` or `keyword`.
+To ease the migration from 2.x Elasticsearch will downgrade `text` and `keyword`
+mappings applied to indexes imported to 2.x into `string`. While long lived
+indexes will eventually need to be recreated against 5.x before eventually
+upgrading to 6.x, this downgrading smooths the process before you find time for
+it.
diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc
index 7e51f4d672..49d7618ae8 100644
--- a/docs/reference/mapping/types/text.asciidoc
+++ b/docs/reference/mapping/types/text.asciidoc
@@ -1,7 +1,7 @@
[[text]]
=== Text datatype
-A field to index full-text values, such as the body of on email or the
+A field to index full-text values, such as the body of an email or the
description of a product. These fields are `analyzed`, that is they are passed through an
<<analysis,analyzer>> to convert the string into a list of individual terms
before being indexed. The analysis process allows Elasticsearch to search for
@@ -135,3 +135,9 @@ The following parameters are accepted by `text` fields:
Whether term vectors should be stored for an <<mapping-index,`analyzed`>>
field. Defaults to `no`.
+
+NOTE: Indexes imported from 2.x do not support `text`. Instead they will
+attempt to downgrade `text` into `string`. This allows you to merge modern
+mappings with legacy mappings. Long lived indexes will have to be recreated
+before upgrading to 6.x but mapping downgrade gives you the opportunity to do
+the recreation on your own schedule.
diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc
index 0dda43da71..daef700fc4 100644
--- a/docs/reference/migration/index.asciidoc
+++ b/docs/reference/migration/index.asciidoc
@@ -8,15 +8,15 @@ your application from one version of Elasticsearch to another.
As a general rule:
-* Migration between minor versions -- e.g. `5.x` to `5.y` -- can be
+* Migration between minor versions -- e.g. `6.x` to `6.y` -- can be
performed by <<rolling-upgrades,upgrading one node at a time>>.
-* Migration between consecutive major versions -- e.g. `2.x` to `5.x` --
+* Migration between consecutive major versions -- e.g. `5.x` to `6.x` --
requires a <<restart-upgrade,full cluster restart>>.
-* Migration between non-consecutive major versions -- e.g. `1.x` to `5.x` --
+* Migration between non-consecutive major versions -- e.g. `2.x` to `6.x` --
is not supported.
See <<setup-upgrade>> for more info.
--
-include::migrate_5_0.asciidoc[]
+include::migrate_6_0.asciidoc[]
diff --git a/docs/reference/migration/migrate_5_0.asciidoc b/docs/reference/migration/migrate_5_0.asciidoc
deleted file mode 100644
index 2449406928..0000000000
--- a/docs/reference/migration/migrate_5_0.asciidoc
+++ /dev/null
@@ -1,129 +0,0 @@
-[[breaking-changes-5.0]]
-== Breaking changes in 5.0
-
-This section discusses the changes that you need to be aware of when migrating
-your application to Elasticsearch 5.0.
-
-[IMPORTANT]
-.Known networking bug in 5.0.0-alpha5
-======================================================
-
-There is a bug in the new Netty4 implementation in this release which affects any REST request with
-a body that is sent in two requests, the first with an `Expect: 100-continue` header. This bug will
-manifest with an exception similar to the following:
-
-[source,txt]
-----
-[WARN ][http.netty4] [wtOV9Vb] caught exception while handling client http traffic, closing connection [id: 0x1320b717, L:/0:0:0:0:0:0:0:1:9200 - R:/0:0:0:0:0:0:0:1:54732]
-java.lang.UnsupportedOperationException: unsupported message type: DefaultFullHttpResponse (expected: ByteBuf, FileRegion)
-----
-
-This is due to incorrect handling of the `Expect` HTTP header, and it can be
-worked around in one of three ways:
-
-* Use a client which does not add `Expect` headers (including the official clients).
-
-* Pass a blank `Except` header, e.g.
-+
-[source,sh]
-----
-curl -H 'Expect:' ...
-----
-
-* Use Netty3 for the HTTP layer by passing the following setting at startup:
-+
-[source,sh]
-----
-./bin/elasticsearch -Ehttp.type=netty3
-----
-
-======================================================
-
-[float]
-[[migration-plugin]]
-=== Migration Plugin
-
-The https://github.com/elastic/elasticsearch-migration/blob/2.x/README.asciidoc[`elasticsearch-migration` plugin]
-(compatible with Elasticsearch 2.3.0 and above) will help you to find issues
-that need to be addressed when upgrading to Elasticsearch 5.0.
-
-[float]
-=== Indices created before 5.0
-
-Elasticsearch 5.0 can read indices created in version 2.0 or above. An
-Elasticsearch 5.0 node will not start in the presence of indices created in a
-version of Elasticsearch before 2.0.
-
-[IMPORTANT]
-.Reindex indices from Elasticseach 1.x or before
-=========================================
-
-Indices created in Elasticsearch 1.x or before will need to be reindexed with
-Elasticsearch 2.x in order to be readable by Elasticsearch 5.x. It is not
-sufficient to use the <<indices-upgrade,`upgrade`>> API. The easiest
-way to reindex old indices is to upgrade to Elasticsearch 2.3 or later and to use the
-`reindex` API, or the reindex UI provided by the <<migration-plugin,Migration Plugin>>.
-
-=========================================
-
-The first time Elasticsearch 5.0 starts, it will automatically rename index
-folders to use the index UUID instead of the index name. If you are using
-<<indices-shadow-replicas,shadow replicas>> with shared data folders, first
-start a single node with access to all data folders, and let it rename all
-index folders before starting other nodes in the cluster.
-
-[float]
-=== Also see:
-
-* <<breaking_50_search_changes>>
-* <<breaking_50_mapping_changes>>
-* <<breaking_50_percolator>>
-* <<breaking_50_suggester>>
-* <<breaking_50_index_apis>>
-* <<breaking_50_document_api_changes>>
-* <<breaking_50_settings_changes>>
-* <<breaking_50_allocation>>
-* <<breaking_50_http_changes>>
-* <<breaking_50_rest_api_changes>>
-* <<breaking_50_cat_api>>
-* <<breaking_50_java_api_changes>>
-* <<breaking_50_packaging>>
-* <<breaking_50_plugins>>
-* <<breaking_50_fs>>
-* <<breaking_50_aggregations_changes>>
-* <<breaking_50_scripting>>
-
-
-include::migrate_5_0/search.asciidoc[]
-
-include::migrate_5_0/mapping.asciidoc[]
-
-include::migrate_5_0/percolator.asciidoc[]
-
-include::migrate_5_0/suggest.asciidoc[]
-
-include::migrate_5_0/index-apis.asciidoc[]
-
-include::migrate_5_0/docs.asciidoc[]
-
-include::migrate_5_0/settings.asciidoc[]
-
-include::migrate_5_0/allocation.asciidoc[]
-
-include::migrate_5_0/http.asciidoc[]
-
-include::migrate_5_0/rest.asciidoc[]
-
-include::migrate_5_0/cat.asciidoc[]
-
-include::migrate_5_0/java.asciidoc[]
-
-include::migrate_5_0/packaging.asciidoc[]
-
-include::migrate_5_0/plugins.asciidoc[]
-
-include::migrate_5_0/fs.asciidoc[]
-
-include::migrate_5_0/aggregations.asciidoc[]
-
-include::migrate_5_0/scripting.asciidoc[]
diff --git a/docs/reference/migration/migrate_5_0/aggregations.asciidoc b/docs/reference/migration/migrate_5_0/aggregations.asciidoc
deleted file mode 100644
index 287da1efb9..0000000000
--- a/docs/reference/migration/migrate_5_0/aggregations.asciidoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[[breaking_50_aggregations_changes]]
-=== Aggregation changes
-
-==== Significant terms on numeric fields
-
-Numeric fields have been refactored to use a different data structure that
-performs better for range queries. However, since this data structure does
-not record document frequencies, numeric fields need to fall back to running
-queries in order to estimate the number of matching documents in the
-background set, which may incur a performance degradation.
-
-It is recommended to use <<keyword,`keyword`>> fields instead, either directly
-or through a <<multi-fields,multi-field>> if the numeric representation is
-still needed for sorting, range queries or numeric aggregations like
-<<search-aggregations-metrics-stats-aggregation,`stats` aggregations>>.
-
-==== `ip_range` aggregations
-
-Now that Elasticsearch supports `ipv6`, `ip` addresses are encoded in the index
-using a binary representation rather than a numeric representation. As a
-consequence, the output of `ip_range` aggregations does not give numeric values
-for `from` and `to` anymore.
-
-==== `size: 0` on Terms, Significant Terms and Geohash Grid Aggregations
-
-`size: 0` is no longer valid for the terms, significant terms and geohash grid
-aggregations. Instead a size should be explicitly specified with a number greater
-than zero.
-
-==== Fractional time values
-
-Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting
-date histogram intervals "1.5h" will be rejected and should instead be input as "90m".
diff --git a/docs/reference/migration/migrate_5_0/allocation.asciidoc b/docs/reference/migration/migrate_5_0/allocation.asciidoc
deleted file mode 100644
index d7affd8ff9..0000000000
--- a/docs/reference/migration/migrate_5_0/allocation.asciidoc
+++ /dev/null
@@ -1,59 +0,0 @@
-[[breaking_50_allocation]]
-=== Allocation changes
-
-==== Primary shard allocation
-
-Previously, primary shards were only assigned if a quorum of shard copies were
-found (configurable using `index.recovery.initial_shards`, now deprecated). In
-case where a primary had only a single replica, quorum was defined to be a
-single shard. This meant that any shard copy of an index with replication
-factor 1 could become primary, even it was a stale copy of the data on disk.
-This is now fixed thanks to shard allocation IDs.
-
-Allocation IDs assign unique identifiers to shard copies. This allows the
-cluster to differentiate between multiple copies of the same data and track
-which shards have been active so that, after a cluster restart, only shard
-copies containing the most recent data can become primaries.
-
-==== Indices Shard Stores command
-
-By using allocation IDs instead of version numbers to identify shard copies
-for primary shard allocation, the former versioning scheme has become
-obsolete. This is reflected in the
-<<indices-shards-stores,Indices Shard Stores API>>.
-
-A new `allocation_id` field replaces the former `version` field in the result
-of the Indices Shard Stores command. This field is available for all shard
-copies that have been either created with the current version of Elasticsearch
-or have been active in a cluster running a current version of Elasticsearch.
-For legacy shard copies that have not been active in a current version of
-Elasticsearch, a `legacy_version` field is available instead (equivalent to
-the former `version` field).
-
-==== Reroute commands
-
-The reroute command `allocate` has been split into two distinct commands
-`allocate_replica` and `allocate_empty_primary`. This was done as we
-introduced a new `allocate_stale_primary` command. The new `allocate_replica`
-command corresponds to the old `allocate` command with `allow_primary` set to
-false. The new `allocate_empty_primary` command corresponds to the old
-`allocate` command with `allow_primary` set to true.
-
-==== Custom Reroute Commands
-
-Elasticsearch no longer supports plugins registering custom allocation
-commands. It was unused and hopefully unneeded.
-
-==== `index.shared_filesystem.recover_on_any_node` changes
-
-The behavior of `index.shared_filesystem.recover_on_any_node: true` has been
-changed. Previously, in the case where no shard copies could be found, an
-arbitrary node was chosen by potentially ignoring allocation deciders. Now, we
-take balancing into account but don't assign the shard if the allocation
-deciders are not satisfied.
-
-The behavior has also changed in the case where shard copies can be found.
-Previously, a node not holding the shard copy was chosen if none of the nodes
-holding shard copies were satisfying the allocation deciders. Now, the shard
-will be assigned to a node having a shard copy, even if none of the nodes
-holding a shard copy satisfy the allocation deciders.
diff --git a/docs/reference/migration/migrate_5_0/cat.asciidoc b/docs/reference/migration/migrate_5_0/cat.asciidoc
deleted file mode 100644
index 6294b23310..0000000000
--- a/docs/reference/migration/migrate_5_0/cat.asciidoc
+++ /dev/null
@@ -1,47 +0,0 @@
-[[breaking_50_cat_api]]
-=== CAT API changes
-
-==== Use Accept header for specifying response media type
-
-Previous versions of Elasticsearch accepted the Content-type header
-field for controlling the media type of the response in the cat API.
-This is in opposition to the HTTP spec which specifies the Accept
-header field for this purpose. Elasticsearch now uses the Accept header
-field and support for using the Content-Type header field for this
-purpose has been removed.
-
-==== Host field removed from the cat nodes API
-
-The `host` field has been removed from the cat nodes API as its value
-is always equal to the `ip` field. The `name` field is available in the
-cat nodes API and should be used instead of the `host` field.
-
-==== Changes to cat recovery API
-
-The fields `bytes_recovered` and `files_recovered` have been added to
-the cat recovery API. These fields, respectively, indicate the total
-number of bytes and files that have been recovered.
-
-The fields `total_files` and `total_bytes` have been renamed to
-`files_total` and `bytes_total`, respectively.
-
-Additionally, the field `translog` has been renamed to
-`translog_ops_recovered`, the field `translog_total` to
-`translog_ops` and the field `translog_percent` to
-`translog_ops_percent`. The short aliases for these fields are `tor`,
-`to`, and `top`, respectively.
-
-==== Changes to cat nodes API
-
-The cat nodes endpoint returns `m` for master eligible, `d` for data,
-and `i` for ingest. A node with no explicit roles will be a coordinating
-only node and marked with `-`. A node can have multiple roles. The
-master column has been adapted to return only whether a node is the
-current master (`*`) or not (`-`).
-
-==== Changes to cat field data API
-
-The cat field data endpoint adds a row per field instead of a column per field.
-
-The `total` field has been removed from the field data API. Total field data usage per node
-can be got by cat nodes API.
diff --git a/docs/reference/migration/migrate_5_0/docs.asciidoc b/docs/reference/migration/migrate_5_0/docs.asciidoc
deleted file mode 100644
index 104f047056..0000000000
--- a/docs/reference/migration/migrate_5_0/docs.asciidoc
+++ /dev/null
@@ -1,57 +0,0 @@
-[[breaking_50_document_api_changes]]
-=== Document API changes
-
-==== `?refresh` no longer supports truthy and falsy values
-The `?refresh` request parameter used to accept any value other than `false`,
-`0`, `off`, and `no` to mean "make the changes from this request visible for
-search immediately." Now it only accepts `?refresh` and `?refresh=true` to
-mean that. You can set it to `?refresh=false` and the request will take no
-refresh-related action. The same is true if you leave `refresh` off of the
-url entirely. If you add `?refresh=wait_for` Elasticsearch will wait for the
-changes to become visible before replying to the request but won't take any
-immediate refresh related action. See <<docs-refresh>>.
-
-==== `created` field deprecated in the Index API
-
-The `created` field has been deprecated in the Index API. It now returns
-`operation`, returning `"operation": "create"` when it created a document and
-`"operation": "index"` when it updated the document. This is also true for
-`index` bulk operations.
-
-==== `found` field deprecated in the Delete API
-
-The `found` field has been deprecated in the Delete API. It now returns
-`operation`, returning `"operation": "deleted"` when it deleted a document and
-`"operation": "noop"` when it didn't found the document. This is also true for
-`index` bulk operations.
-
-==== Reindex and Update By Query
-Before 5.0.0 `_reindex` and `_update_by_query` only retried bulk failures so
-they used the following response format:
-
-[source,js]
-----------------------
-{
- ...
- "retries": 10
- ...
-}
-----------------------
-
-Where `retries` counts the number of bulk retries. Now they retry on search
-failures as well and use this response format:
-
-[source,js]
-----------------------
-{
- ...
- "retries": {
- "bulk": 10,
- "search": 1
- }
- ...
-}
-----------------------
-
-Where `bulk` counts the number of bulk retries and `search` counts the number
-of search retries.
diff --git a/docs/reference/migration/migrate_5_0/fs.asciidoc b/docs/reference/migration/migrate_5_0/fs.asciidoc
deleted file mode 100644
index 42c8b4ddce..0000000000
--- a/docs/reference/migration/migrate_5_0/fs.asciidoc
+++ /dev/null
@@ -1,31 +0,0 @@
-[[breaking_50_fs]]
-=== Filesystem related changes
-
-Only a subset of index files were open with `mmap` on Elasticsearch 2.x. As of
-Elasticsearch 5.0, all index files will be open with `mmap` on 64-bit systems.
-While this may increase the amount of virtual memory used by Elasticsearch,
-there is nothing to worry about since this is only address space consumption
-and the actual memory usage of Elasticsearch will stay similar to what it was
-in 2.x. See http://blog.thetaphi.de/2012/07/use-lucenes-mmapdirectory-on-64bit.html
-for more information.
-
-=== Path to data on disk
-
-In prior versions of Elasticsearch, the `path.data` directory included a folder
-for the cluster name, so that data was in a folder such as
-`$DATA_DIR/$CLUSTER_NAME/nodes/$nodeOrdinal`. In 5.0 the cluster name as a
-directory is deprecated. Data will now be stored in
-`$DATA_DIR/nodes/$nodeOrdinal` if there is no existing data. Upon startup,
-Elasticsearch will check to see if the cluster folder exists and has data, and
-will read from it if necessary. In Elasticsearch 6.0 this backwards-compatible
-behavior will be removed.
-
-If you are using a multi-cluster setup with both instances of Elasticsearch
-pointing to the same data path, you will need to add the cluster name to the
-data path so that different clusters do not overwrite data.
-
-==== Local files
-
-Prior to 5.0, nodes that were marked with both `node.data: false` and `node.master: false` (or the now removed `node.client: true`)
-didn't write any files or folder to disk. 5.x added persistent node ids, requiring nodes to store that information. As such, all
-node types will write a small state file to their data folders. \ No newline at end of file
diff --git a/docs/reference/migration/migrate_5_0/http.asciidoc b/docs/reference/migration/migrate_5_0/http.asciidoc
deleted file mode 100644
index 68a3f2841f..0000000000
--- a/docs/reference/migration/migrate_5_0/http.asciidoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[[breaking_50_http_changes]]
-=== HTTP changes
-
-==== Compressed HTTP requests are always accepted
-
-Before 5.0, Elasticsearch accepted compressed HTTP requests only if the setting
- `http.compressed` was set to `true`. Elasticsearch accepts compressed requests
- now but will continue to send compressed responses only if `http.compressed`
- is set to `true`. \ No newline at end of file
diff --git a/docs/reference/migration/migrate_5_0/index-apis.asciidoc b/docs/reference/migration/migrate_5_0/index-apis.asciidoc
deleted file mode 100644
index 599b1681ab..0000000000
--- a/docs/reference/migration/migrate_5_0/index-apis.asciidoc
+++ /dev/null
@@ -1,51 +0,0 @@
-[[breaking_50_index_apis]]
-=== Index APIs changes
-
-==== Closing / deleting indices while running snapshot
-
-In previous versions of Elasticsearch, closing or deleting an index during a
-full snapshot would make the snapshot fail. In 5.0, the close/delete index
-request will fail instead. The behavior for partial snapshots remains
-unchanged: Closing or deleting an index during a partial snapshot is still
-possible. The snapshot result is then marked as partial.
-
-==== Warmers
-
-Thanks to several changes like doc values by default and disk-based norms,
-warmers are no longer useful. As a consequence, warmers and the warmer API
-have been removed: it is no longer possible to register queries that will run
-before a new IndexSearcher is published.
-
-Don't worry if you have warmers defined on your indices, they will simply be
-ignored when upgrading to 5.0.
-
-==== System CPU stats
-
-The recent CPU usage (as a percent) has been added to the OS stats
-reported under the node stats API and the cat nodes API. The breaking
-change here is that there is a new object in the `os` object in the node
-stats response. This object is called `cpu` and includes `percent` and
-`load_average` as fields. This moves the `load_average` field that was
-previously a top-level field in the `os` object to the `cpu` object. The
-format of the `load_average` field has changed to an object with fields
-`1m`, `5m`, and `15m` representing the one-minute, five-minute and
-fifteen-minute loads respectively. If any of these fields are not present,
-it indicates that the corresponding value is not available.
-
-In the cat nodes API response, the `cpu` field is output by default. The
-previous `load` field has been removed and is replaced by `load_1m`,
-`load_5m`, and `load_15m` which represent the one-minute, five-minute
-and fifteen-minute loads respectively. The field will be null if the
-corresponding value is not available.
-
-Finally, the API for `org.elasticsearch.monitor.os.OsStats` has
-changed. The `getLoadAverage` method has been removed. The value for
-this can now be obtained from `OsStats.Cpu#getLoadAverage` but it is no
-longer a double and is instead an object encapsulating the one-minute,
-five-minute and fifteen-minute load averages. Additionally, the recent
-CPU usage can be obtained from `OsStats.Cpu#getPercent`.
-
-==== Suggest stats
-
-Suggest stats exposed through `suggest` in indices stats has been merged
-with `search` stats. `suggest` stats is exposed as part of `search` stats.
diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc
deleted file mode 100644
index 83a235a183..0000000000
--- a/docs/reference/migration/migrate_5_0/java.asciidoc
+++ /dev/null
@@ -1,402 +0,0 @@
-
-
-
-[[breaking_50_java_api_changes]]
-=== Java API changes
-
-==== Transport client has been moved
-
-The Java transport client has been moved to its own module which can be referenced using:
-
-[source,xml]
------
-<dependency>
- <groupId>org.elasticsearch.client</groupId>
- <artifactId>transport</artifactId>
- <version>5.0.0-alpha5</version>
-</dependency>
------
-
-The transport client is now created using the following snippet:
-
-[source,java]
------
-TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)
- .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host1"), 9300))
- .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host2"), 9300));
------
-
-For more information please see the {javaclient}java-api.html[Java client documentation]
-
-==== Count api has been removed
-
-The deprecated count api has been removed from the Java api, use the search api instead and set size to 0.
-
-The following call
-
-[source,java]
------
-client.prepareCount(indices).setQuery(query).get();
------
-
-can be replaced with
-
-[source,java]
------
-client.prepareSearch(indices).setSource(new SearchSourceBuilder().size(0).query(query)).get();
------
-
-==== Suggest api has been removed
-
-The suggest api has been removed from the Java api, use the suggest option in search api, it has been optimized
-for suggest-only request.
-
-The following call
-
-[source,java]
------
-client.prepareSuggest(indices).addSuggestion("foo", SuggestBuilders.completionSuggestion("field").text("s")).get();
------
-
-can be replaced with
-
-[source,java]
------
-client.prepareSearch(indices).suggest(new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion("field").text("s"))).get();
------
-
-==== Elasticsearch will no longer detect logging implementations
-
-Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the
-classpath it made some effort to degrade to slf4j or java.util.logging. Now it
-will fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought
-to work when using the java client, as should log4j 2's log4j-1.2-api. The
-Elasticsearch server now only supports log4j as configured by `logging.yml`
-and will fail if log4j isn't present.
-
-==== Groovy dependencies
-
-In previous versions of Elasticsearch, the Groovy scripting capabilities
-depended on the `org.codehaus.groovy:groovy-all` artifact. In addition
-to pulling in the Groovy language, this pulls in a very large set of
-functionality, none of which is needed for scripting within
-Elasticsearch. Aside from the inherent difficulties in managing such a
-large set of dependencies, this also increases the surface area for
-security issues. This dependency has been reduced to the core Groovy
-language `org.codehaus.groovy:groovy` artifact.
-
-==== DocumentAlreadyExistsException removed
-
-`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better
-error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()`
-to index a document only if it doesn't already exist.
-
-==== writeConsistencyLevel removed on write requests
-
-In previous versions of Elasticsearch, the various write requests had a
-`setWriteConsistencyLevel` method to set the shard consistency level for
-write operations. However, the semantics of write consistency were ambiguous
-as this is just a pre-operation check to ensure the specified number of
-shards were available before the operation commenced. The write consistency
-level did not guarantee that the data would be replicated to those number
-of copies by the time the operation finished. The `setWriteConsistencyLevel`
-method on these write requests has been changed to `setWaitForActiveShards`,
-which can take a numerical value up to the total number of shard copies or
-`ActiveShardCount.ALL` for all shard copies. The default is to just wait
-for the primary shard to be active before proceeding with the operation.
-See the section on <<index-wait-for-active-shards,wait for active shards>>
-for more details.
-
-This change affects `IndexRequest`, `IndexRequestBuilder`, `BulkRequest`,
-`BulkRequestBuilder`, `UpdateRequest`, `UpdateRequestBuilder`, `DeleteRequest`,
-and `DeleteRequestBuilder`.
-
-==== Changes to Query Builders
-
-===== BoostingQueryBuilder
-
-Removed setters for mandatory positive/negative query. Both arguments now have
-to be supplied at construction time already and have to be non-null.
-
-===== SpanContainingQueryBuilder
-
-Removed setters for mandatory big/little inner span queries. Both arguments now have
-to be supplied at construction time already and have to be non-null. Updated
-static factory methods in QueryBuilders accordingly.
-
-===== SpanOrQueryBuilder
-
-Making sure that query contains at least one clause by making initial clause mandatory
-in constructor.
-Renaming method to add clauses from `clause(SpanQueryBuilder)` to `addClause(SpanQueryBuilder)`.
-
-===== SpanNearQueryBuilder
-
-Removed setter for mandatory slop parameter, needs to be set in constructor now. Also
-making sure that query contains at least one clause by making initial clause mandatory
-in constructor. Updated the static factory methods in QueryBuilders accordingly.
-Renaming method to add clauses from `clause(SpanQueryBuilder)` to `addClause(SpanQueryBuilder)`.
-
-===== SpanNotQueryBuilder
-
-Removed setter for mandatory include/exclude span query clause, needs to be set in constructor now.
-Updated the static factory methods in QueryBuilders and tests accordingly.
-
-===== SpanWithinQueryBuilder
-
-Removed setters for mandatory big/little inner span queries. Both arguments now have
-to be supplied at construction time already and have to be non-null. Updated
-static factory methods in QueryBuilders accordingly.
-
-===== WrapperQueryBuilder
-
-Removed `wrapperQueryBuilder(byte[] source, int offset, int length)`. Instead simply
-use `wrapperQueryBuilder(byte[] source)`. Updated the static factory methods in
-QueryBuilders accordingly.
-
-===== QueryStringQueryBuilder
-
-Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`.
-Use the `field(String, float)` method instead.
-
-===== Operator
-
-Removed the enums called `Operator` from `MatchQueryBuilder`, `QueryStringQueryBuilder`,
-`SimpleQueryStringBuilder`, and `CommonTermsQueryBuilder` in favour of using the enum
-defined in `org.elasticsearch.index.query.Operator` in an effort to consolidate the
-codebase and avoid duplication.
-
-===== queryName and boost support
-
-Support for `queryName` and `boost` has been streamlined to all of the queries. That is
-a breaking change till queries get sent over the network as serialized json rather
-than in `Streamable` format. In fact whenever additional fields are added to the json
-representation of the query, older nodes might throw error when they find unknown fields.
-
-===== InnerHitsBuilder
-
-InnerHitsBuilder now has a dedicated addParentChildInnerHits and addNestedInnerHits methods
-to differentiate between inner hits for nested vs. parent / child documents. This change
-makes the type / path parameter mandatory.
-
-===== MatchQueryBuilder
-
-Moving MatchQueryBuilder.Type and MatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.Type.
-Also reusing new Operator enum.
-
-===== MoreLikeThisQueryBuilder
-
-Removed `MoreLikeThisQueryBuilder.Item#id(String id)`, `Item#doc(BytesReference doc)`,
-`Item#doc(XContentBuilder doc)`. Use provided constructors instead.
-
-Removed `MoreLikeThisQueryBuilder#addLike` in favor of texts and/or items being provided
-at construction time. Using arrays there instead of lists now.
-
-Removed `MoreLikeThisQueryBuilder#addUnlike` in favor to using the `unlike` methods
-which take arrays as arguments now rather than the lists used before.
-
-The deprecated `docs(Item... docs)`, `ignoreLike(Item... docs)`,
-`ignoreLike(String... likeText)`, `addItem(Item... likeItems)` have been removed.
-
-===== GeoDistanceQueryBuilder
-
-Removing individual setters for lon() and lat() values, both values should be set together
- using point(lon, lat).
-
-===== GeoDistanceRangeQueryBuilder
-
-Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input
-arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter
-is mandatory and should already be set in constructor.
-Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent
-calls to to/from() and inludeLower()/includeUpper().
-
-===== GeoPolygonQueryBuilder
-
-Require shell of polygon already to be specified in constructor instead of adding it pointwise.
-This enables validation, but makes it necessary to remove the addPoint() methods.
-
-===== MultiMatchQueryBuilder
-
-Moving MultiMatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.ZeroTermsQuery.
-Also reusing new Operator enum.
-
-Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`.
-Use the `field(String, float)` method instead.
-
-===== MissingQueryBuilder
-
-The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder
-inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use
-`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`.
-
-===== NotQueryBuilder
-
-The NotQueryBuilder which was deprecated in 2.1.0 is removed. As a replacement use BoolQueryBuilder
-with added mustNot() clause. So instead of using `new NotQueryBuilder(filter)` now use
-`new BoolQueryBuilder().mustNot(filter)`.
-
-===== TermsQueryBuilder
-
-Remove the setter for `termsLookup()`, making it only possible to either use a TermsLookup object or
-individual values at construction time. Also moving individual settings for the TermsLookup (lookupIndex,
-lookupType, lookupId, lookupPath) to the separate TermsLookup class, using constructor only and moving
-checks for validation there. Removed `TermsLookupQueryBuilder` in favour of `TermsQueryBuilder`.
-
-===== FunctionScoreQueryBuilder
-
-`add` methods have been removed, all filters and functions must be provided as constructor arguments by
-creating an array of `FunctionScoreQueryBuilder.FilterFunctionBuilder` objects, containing one element
-for each filter/function pair.
-
-`scoreMode` and `boostMode` can only be provided using corresponding enum members instead
-of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction`.
-
-`CombineFunction.MULT` has been renamed to `MULTIPLY`.
-
-===== IdsQueryBuilder
-
-For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)`
-
-===== ShapeBuilders
-
-`InternalLineStringBuilder` is removed in favour of `LineStringBuilder`, `InternalPolygonBuilder` in favour of PolygonBuilder` and `Ring` has been replaced with `LineStringBuilder`. Also the abstract base classes `BaseLineStringBuilder` and `BasePolygonBuilder` haven been merged with their corresponding implementations.
-
-===== RescoreBuilder
-
-`RecoreBuilder.Rescorer` was merged with `RescoreBuilder`, which now is an abstract superclass. QueryRescoreBuilder currently is its only implementation.
-
-===== PhraseSuggestionBuilder
-
-The inner DirectCandidateGenerator class has been moved out to its own class called DirectCandidateGeneratorBuilder.
-
-===== SortBuilders
-
-The `sortMode` setter in `FieldSortBuilder`, `GeoDistanceSortBuilder` and `ScriptSortBuilder` now
-accept a `SortMode` enum instead of a String constant. Also the getter returns the same enum type.
-
-===== SuggestBuilder
-
-The `setText` method has been changed to `setGlobalText` to make the intent more clear, and a `getGlobalText` method has been added.
-
-The `addSuggestion` method now required the user specified suggestion name, previously used in the ctor of each suggestion.
-
-===== SuggestionBuilder
-
-The `field` setter has been deleted. Instead the field name needs to be specified as constructor argument.
-
-==== SearchSourceBuilder
-
-All methods which take an `XContentBuilder`, `BytesReference` `Map<String, Object>` or `bytes[]` have been removed in favor of providing the
-relevant builder object for that feature (e.g. `HighlightBuilder`, `AggregationBuilder`, `SuggestBuilder`) . This means that all search requests
-can now be validated at call time which results in much clearer errors.
-
-The `defaultResourceWindowSize(int)` method has been removed. The window size should be set explicitly on all `RescoreBuilder` objects.
-
-==== SearchRequestBuilder
-
-All methods which take an `XContentBuilder`, `BytesReference` `Map<String, Object>` or `bytes[]` have been removed in favor of providing the
-relevant builder object for that feature (e.g. `HighlightBuilder`, `AggregationBuilder`, `SuggestBuilder`) . This means that all search requests
-can now be validated at call time which results in much clearer errors.
-
-All highlighter methods have been removed in favor of a single `highlighter(HighlightBuilder)` method.
-
-The `setExtraSource(SearchSourceBuilder)` method has been removed.
-
-The `setTemplateSource(String)` and `setTemplateSource(BytesReference)` methods have been removed. Use `setTemplate(Template)` instead.
-
-`setRescorer(Rescorer)` and `setRescorer(Rescorer, int)` have been removed infavor of `setRescorer(RescoreBuilder)` and `setRescorer(RescoreBuilder, int)`
-
-==== SearchRequest
-
-All `source` methods have been removed in favor of a single `source(SearchSourceBuilder)` method. This means that all search requests can now be validated
-at call time which results in much clearer errors.
-
-All `extraSource` methods have been removed.
-
-All `template` methods have been removed in favor of a new Search Template API. A new `SearchTemplateRequest` now accepts a template and
-a `SearchRequest` and must be executed using the new `SearchTemplateAction` action.
-
-==== SearchResponse
-
-Sort values for `string` fields are now return as `java.lang.String` objects rather than `org.elasticsearch.common.text.Text`.
-
-==== AggregationBuilder
-
-All methods which take an `XContentBuilder`, `BytesReference` `Map<String, Object>` or `bytes[]` have been removed in favor of providing the
-relevant builder object (i.e. `subAggregation(AggregationBuilder)` or `subAggregation(PipelineAggregationBuilder)`). This means that all
-requests can now be validated at call time which results in much clearer errors.
-
-==== ValidateQueryRequest
-
-`source(QuerySourceBuilder)`, `source(Map)`, `source(XContentBuilder)`, `source(String)`, `source(byte[])`, `source(byte[], int, int)`,
-`source(BytesReference)` and `source()` have been removed in favor of using `query(QueryBuilder)` and `query()`
-
-==== ValidateQueryRequestBuilder
-
-`setSource()` methods have been removed in favor of using `setQuery(QueryBuilder)`
-
-==== ExplainRequest
-
-`source(QuerySourceBuilder)`, `source(Map)`, `source(BytesReference)` and `source()` have been removed in favor of using
-`query(QueryBuilder)` and `query()`
-
-==== ExplainRequestBuilder
-
-The `setQuery(BytesReference)` method have been removed in favor of using `setQuery(QueryBuilder)`
-
-==== ClusterStatsResponse
-
-Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling
-`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`.
-
-==== setRefresh(boolean) has been removed
-
-`setRefresh(boolean)` has been removed in favor of `setRefreshPolicy(RefreshPolicy)` because there
-are now three options (NONE, IMMEDIATE, and WAIT_FOR). `setRefresh(IMMEDIATE)` has the same behavior
-as `setRefresh(true)` used to have. See `setRefreshPolicy`'s javadoc for more.
-
-==== Remove properties support
-
-Some Java APIs (e.g., `IndicesAdminClient#setSettings`) would support Java properties syntax
-(line-delimited key=value pairs). This support has been removed.
-
-==== Render Search Template Java API has been removed
-
-The Render Search Template Java API including `RenderSearchTemplateAction`, `RenderSearchTemplateRequest` and
-`RenderSearchTemplateResponse` has been removed in favor of a new `simulate` option in the Search Template Java API.
- This Search Template API is now included in the `lang-mustache` module and the `simulate` flag must be set on the
- `SearchTemplateRequest` object.
-
-==== AnalyzeRequest
-
-The `tokenFilters(String...)` and `charFilters(String...)` methods have been removed
-in favor of using `addTokenFilter(String)`/`addTokenFilter(Map)` and `addCharFilter(String)`/`addCharFilter(Map)` each filters
-
-==== AnalyzeRequestBuilder
-
-The `setTokenFilters(String...)` and `setCharFilters(String...)` methods have been removed
-in favor of using `addTokenFilter(String)`/`addTokenFilter(Map)` and `addCharFilter(String)`/`addCharFilter(Map)` each filters
-
-==== BlobContainer Interface for Snapshot/Restore
-
-Some methods have been removed from the `BlobContainer` interface for Snapshot/Restore repositories. In particular,
-the following three methods have been removed:
-
- 1. `deleteBlobs(Collection<String>)` (use `deleteBlob(String)` instead)
- 2. `deleteBlobsByPrefix(String)` (use `deleteBlob(String)` instead)
- 3. `writeBlob(String, BytesReference)` (use `writeBlob(String, InputStream, long)` instead)
-
-The `deleteBlob` methods that took multiple blobs as arguments were deleted because no atomic guarantees can be made about either deleting all blobs or deleting none of them, and exception handling in such a situation is ambiguous and best left to the caller. Hence, all delete blob calls use the singular `deleteBlob(String)` method.
-
-The extra `writeBlob` method offered no real advantage to the interface and all calls to `writeBlob(blobName, bytesRef)` can be replaced with:
-
-[source,java]
------
-try (InputStream stream = bytesRef.streamInput()) {
- blobContainer.writeBlob(blobName, stream, bytesRef.length());
-}
------
-
-For any custom implementation of the `BlobContainer` interface, these three methods must be removed.
-
diff --git a/docs/reference/migration/migrate_5_0/mapping.asciidoc b/docs/reference/migration/migrate_5_0/mapping.asciidoc
deleted file mode 100644
index dd467abe9e..0000000000
--- a/docs/reference/migration/migrate_5_0/mapping.asciidoc
+++ /dev/null
@@ -1,264 +0,0 @@
-[[breaking_50_mapping_changes]]
-=== Mapping changes
-
-==== `string` fields replaced by `text`/`keyword` fields
-
-The `string` field datatype has been replaced by the `text` field for full
-text analyzed content, and the `keyword` field for not-analyzed exact string
-values. For backwards compatibility purposes, during the 5.x series:
-
-* `string` fields on pre-5.0 indices will function as before.
-* New `string` fields can be added to pre-5.0 indices as before.
-* `text` and `keyword` fields can also be added to pre-5.0 indices.
-* When adding a `string` field to a new index, the field mapping will be
- rewritten as a `text` or `keyword` field if possible, otherwise
- an exception will be thrown. Certain configurations that were possible
- with `string` fields are no longer possible with `text`/`keyword` fields
- such as enabling `term_vectors` on a not-analyzed `keyword` field.
-
-==== Default string mappings
-
-String mappings now have the following default mappings:
-
-[source,js]
----------------
-{
- "type": "text",
- "fields": {
- "keyword": {
- "type": "keyword",
- "ignore_above": 256
- }
- }
-}
----------------
-
-This allows to perform full-text search on the original field name and to sort
-and run aggregations on the sub keyword field.
-
-==== Numeric fields
-
-Numeric fields are now indexed with a completely different data-structure, called
-BKD tree, that is expected to require less disk space and be faster for range
-queries than the previous way that numerics were indexed.
-
-Term queries will return constant scores now, while they used to return higher
-scores for rare terms due to the contribution of the document frequency, which
-this new BKD structure does not record. If scoring is needed, then it is advised
-to map the numeric fields as <<keyword,`keyword`s>> too.
-
-Note that this <<keyword,`keyword`>> mapping do not need to replace the numeric
-mapping. For instance if you need both sorting and scoring on your numeric field,
-you could map it both as a number and a `keyword` using <<multi-fields>>:
-
-[source,js]
---------------------------------------------------
-PUT my_index
-{
- "mappings": {
- "my_type": {
- "properties": {
- "my_number": {
- "type": "long",
- "fields": {
- "keyword": {
- "type": "keyword"
- }
- }
- }
- }
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-
-Also the `precision_step` parameter is now irrelevant and will be rejected on
-indices that are created on or after 5.0.
-
-==== `_timestamp` and `_ttl`
-
-The `_timestamp` and `_ttl` fields were deprecated and are now removed. As a
-replacement for `_timestamp`, you should populate a regular date field with the
-current timestamp on application side. For `_ttl`, you should either use
-time-based indices when applicable, or cron a delete-by-query with a range
-query on a timestamp field
-
-==== `index` property
-
-On all field datatypes (except for the deprecated `string` field), the `index`
-property now only accepts `true`/`false` instead of `not_analyzed`/`no`. The
-`string` field still accepts `analyzed`/`not_analyzed`/`no`.
-
-==== Doc values on unindexed fields
-
-Previously, setting a field to `index:no` would also disable doc-values. Now,
-doc-values are always enabled on numeric and boolean fields unless
-`doc_values` is set to `false`.
-
-==== Floating points use `float` instead of `double`
-
-When dynamically mapping a field containing a floating point number, the field
-now defaults to using `float` instead of `double`. The reasoning is that
-floats should be more than enough for most cases but would decrease storage
-requirements significantly.
-
-==== `norms`
-
-`norms` now take a boolean instead of an object. This boolean is the replacement
-for `norms.enabled`. There is no replacement for `norms.loading` since eager
-loading of norms is not useful anymore now that norms are disk-based.
-
-==== `fielddata.format`
-
-Setting `fielddata.format: doc_values` in the mappings used to implicitly
-enable doc-values on a field. This no longer works: the only way to enable or
-disable doc-values is by using the `doc_values` property of mappings.
-
-==== `fielddata.filter.regex`
-
-Regex filters are not supported anymore and will be dropped on upgrade.
-
-==== Source-transform removed
-
-The source `transform` feature has been removed. Instead, use an ingest pipeline
-
-==== `_parent` field no longer indexed
-
-The join between parent and child documents no longer relies on indexed fields
-and therefore from 5.0.0 onwards the `_parent` field is no longer indexed. In
-order to find documents that refer to a specific parent id, the new
-`parent_id` query can be used. The GET response and hits inside the search
-response still include the parent id under the `_parent` key.
-
-==== Source `format` option
-
-The `_source` mapping no longer supports the `format` option. It will still be
-accepted for indices created before the upgrade to 5.0 for backwards
-compatibility, but it will have no effect. Indices created on or after 5.0
-will reject this option.
-
-==== Object notation
-
-Core types no longer support the object notation, which was used to provide
-per document boosts as follows:
-
-[source,js]
----------------
-{
- "value": "field_value",
- "boost": 42
-}
----------------
-
-==== Boost accuracy for queries on `_all`
-
-Per-field boosts on the `_all` are now compressed into a single byte instead
-of the 4 bytes used previously. While this will make the index much more
-space-efficient, it also means that index time boosts will be less accurately
-encoded.
-
-==== `_ttl` and `_timestamp` cannot be created
-
-You can no longer create indexes with `_ttl` or `_timestamp` enabled. Indexes
-with them enabled created before 5.0 will continue to work.
-
-You should replace `_timestamp` in new indexes by adding a field to your source
-either in the application producing the data or with an ingest pipline like
-this one:
-
-[source,js]
----------------
-PUT _ingest/pipeline/timestamp
-{
- "description" : "Adds a timestamp field at the current time",
- "processors" : [ {
- "set" : {
- "field": "timestamp",
- "value": "{{_ingest.timestamp}}"
- }
- } ]
-}
-
-PUT newindex/type/1?pipeline=timestamp
-{
- "example": "data"
-}
-
-GET newindex/type/1
----------------
-// CONSOLE
-
-Which produces
-[source,js]
----------------
-{
- "_source": {
- "example": "data",
- "timestamp": "2016-06-21T18:48:55.560+0000"
- },
- ...
-}
----------------
-// TESTRESPONSE[s/\.\.\./"found": true, "_id": "1", "_index": "newindex", "_type": "type", "_version": 1/]
-// TESTRESPONSE[s/"2016-06-21T18:48:55.560\+0000"/"$body._source.timestamp"/]
-
-If you have an old index created with 2.x that has `_timestamp` enabled then
-you can migrate it to a new index with the a `timestamp` field in the source
-with reindex:
-
-[source,js]
----------------
-POST _reindex
-{
- "source": {
- "index": "oldindex"
- },
- "dest": {
- "index": "newindex"
- },
- "script": {
- "lang": "painless",
- "inline": "ctx._source.timestamp = ctx._timestamp; ctx._timestamp = null"
- }
-}
----------------
-// CONSOLE
-// TEST[s/^/PUT oldindex\n/]
-
-You can replace `_ttl` with time based index names (preferred) or by adding a
-cron job which runs a delete-by-query on a timestamp field in the source
-document. If you had documents like this:
-
-[source,js]
----------------
-POST index/type/_bulk
-{"index":{"_id":1}}
-{"example": "data", "timestamp": "2016-06-21T18:48:55.560+0000" }
-{"index":{"_id":2}}
-{"example": "data", "timestamp": "2016-04-21T18:48:55.560+0000" }
----------------
-// CONSOLE
-
-Then you could delete all of the documents from before June 1st with:
-
-[source,js]
----------------
-POST index/type/_delete_by_query
-{
- "query": {
- "range" : {
- "timestamp" : {
- "lt" : "2016-05-01"
- }
- }
- }
-}
----------------
-// CONSOLE
-// TEST[continued]
-
-IMPORTANT: Keep in mind that deleting documents from an index is very expensive
-compared to deleting whole indexes. That is why time based indexes are
-recommended over this sort of thing and why `_ttl` was deprecated in the first
-place.
diff --git a/docs/reference/migration/migrate_5_0/packaging.asciidoc b/docs/reference/migration/migrate_5_0/packaging.asciidoc
deleted file mode 100644
index 74faf3bb7d..0000000000
--- a/docs/reference/migration/migrate_5_0/packaging.asciidoc
+++ /dev/null
@@ -1,65 +0,0 @@
-[[breaking_50_packaging]]
-=== Packaging
-
-==== Default logging using systemd (since Elasticsearch 2.2.0)
-
-In previous versions of Elasticsearch, the default logging
-configuration routed standard output to /dev/null and standard error to
-the journal. However, there are often critical error messages at
-startup that are logged to standard output rather than standard error
-and these error messages would be lost to the nether. The default has
-changed to now route standard output to the journal and standard error
-to inherit this setting (these are the defaults for systemd). These
-settings can be modified by editing the elasticsearch.service file.
-
-==== Longer startup times
-
-In Elasticsearch 5.0.0 the `-XX:+AlwaysPreTouch` flag has been added to the JVM
-startup options. This option touches all memory pages used by the JVM heap
-during initialization of the HotSpot VM to reduce the chance of having to commit
-a memory page during GC time. This will increase the startup time of
-Elasticsearch as well as increasing the initial resident memory usage of the
-Java process.
-
-==== JVM options
-
-Arguments to the Java Virtual Machine have been centralized and moved
-to a new configuration file jvm.options. This centralization allows for
-simpler end-user management of JVM options.
-
-This migration removes all previous mechanisms of setting JVM options
-via the environment variables `ES_MIN_MEM`, `ES_MAX_MEM`,
-`ES_HEAP_SIZE`, `ES_HEAP_NEWSIZE`, `ES_DIRECT_SIZE`, `ES_USE_IPV4`,
-`ES_GC_OPTS`, `ES_GC_LOG_FILE`, and `JAVA_OPTS`.
-
-The default location for this file is in config/jvm.options if installing
-from the tar or zip distributions, and /etc/elasticsearch/jvm.options if installing
-from the Debian or RPM packages. You can specify an alternative location by setting
-the environment variable `ES_JVM_OPTIONS` to the path to the file.
-
-==== /bin/bash is now required
-
-Previously, the scripts used to start Elasticsearch and run plugin
-commands only required a Bourne-compatible shell. Starting in
-Elasticsearch 5.0.0, the bash shell is now required and `/bin/bash` is a
-hard-dependency for the RPM and Debian packages.
-
-==== Environmental Settings
-
-Previously, Elasticsearch could be configured via environment variables
-in two ways: first by using the placeholder syntax
-`${env.ENV_VAR_NAME}` and the second by using the same syntax without
-the `env` prefix: `${ENV_VAR_NAME}`. The first method has been removed
-from Elasticsearch.
-
-Additionally, it was previously possible to set any setting in
-Elasticsearch via JVM system properties. This has been removed from
-Elasticsearch.
-
-==== Dying on fatal errors
-
-Previous versions of Elasticsearch would not halt the JVM if out of memory errors or other fatal
-errors were encountered during the life of the Elasticsearch instance. Because such errors leave
-the JVM in a questionable state, the best course of action is to halt the JVM when this occurs.
-Starting in Elasticsearch 5.x, this is now the case. Operators should consider configuring their
-Elasticsearch services so that they respawn automatically in the case of such a fatal crash.
diff --git a/docs/reference/migration/migrate_5_0/percolator.asciidoc b/docs/reference/migration/migrate_5_0/percolator.asciidoc
deleted file mode 100644
index f173a0df95..0000000000
--- a/docs/reference/migration/migrate_5_0/percolator.asciidoc
+++ /dev/null
@@ -1,111 +0,0 @@
-[[breaking_50_percolator]]
-=== Percolator changes
-
-==== Percolator is near-real time
-
-Previously percolators were activated in real-time, i.e. as soon as they were
-indexed. Now, changes to the `percolate` query are visible in near-real time,
-as soon as the index has been refreshed. This change was required because, in
-indices created from 5.0 onwards, the terms used in a percolator query are
-automatically indexed to allow for more efficient query selection during
-percolation.
-
-==== Percolate and multi percolator APIs
-
-Percolator and multi percolate APIs have been deprecated and will be removed in the next major release. These APIs have
-been replaced by the `percolate` query that can be used in the search and multi search APIs.
-
-==== Percolator field mapping
-
-The `.percolator` type can no longer be used to index percolator queries.
-
-Instead a <<percolator,percolator field type>> must be configured prior to indexing percolator queries.
-
-Indices with a `.percolator` type created on a version before 5.0.0 can still be used,
-but new indices no longer accept the `.percolator` type.
-
-==== Percolate document mapping
-
-The `percolate` query no longer modifies the mappings. Before the percolate API
-could be used to dynamically introduce new fields to the mappings based on the
-fields in the document being percolated. This no longer works, because these
-unmapped fields are not persisted in the mapping.
-
-==== Percolator documents returned by search
-
-Documents with the `.percolate` type were previously excluded from the search
-response, unless the `.percolate` type was specified explicitly in the search
-request. Now, percolator documents are treated in the same way as any other
-document and are returned by search requests.
-
-==== Percolating existing document
-
-When percolating an existing document then also specifying a document as source in the
-`percolate` query is not allowed any more. Before the percolate API allowed and ignored
-the existing document.
-
-==== Percolate Stats
-
-The percolate stats have been removed. This is because the percolator no longer caches the percolator queries.
-
-==== Percolator queries containing range queries with now ranges
-
-The percolator no longer accepts percolator queries containing `range` queries with ranges that are based on current
-time (using `now`).
-
-==== Java client
-
-The percolator is no longer part of the core elasticsearch dependency. It has moved to the percolator module.
-Therefor when using the percolator feature from the Java client the new percolator module should also be on the
-classpath. Also the transport client should load the percolator module as plugin:
-
-[source,java]
---------------------------------------------------
-TransportClient transportClient = TransportClient.builder()
- .settings(Settings.builder().put("node.name", "node"))
- .addPlugin(PercolatorPlugin.class)
- .build();
-transportClient.addTransportAddress(
- new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))
-);
---------------------------------------------------
-
-The percolator and multi percolate related methods from the `Client` interface have been removed. These APIs have been
-deprecated and it is recommended to use the `percolate` query in either the search or multi search APIs. However the
-percolate and multi percolate APIs can still be used from the Java client.
-
-Using percolate request:
-
-[source,java]
---------------------------------------------------
-PercolateRequest request = new PercolateRequest();
-// set stuff and then execute:
-PercolateResponse response = transportClient.execute(PercolateAction.INSTANCE, request).actionGet();
---------------------------------------------------
-
-Using percolate request builder:
-
-[source,java]
---------------------------------------------------
-PercolateRequestBuilder builder = new PercolateRequestBuilder(transportClient, PercolateAction.INSTANCE);
-// set stuff and then execute:
-PercolateResponse response = builder.get();
---------------------------------------------------
-
-Using multi percolate request:
-
-[source,java]
---------------------------------------------------
-MultiPercolateRequest request = new MultiPercolateRequest();
-// set stuff and then execute:
-MultiPercolateResponse response = transportClient.execute(MultiPercolateAction.INSTANCE, request).get();
---------------------------------------------------
-
-Using multi percolate request builder:
-
-[source,java]
---------------------------------------------------
-MultiPercolateRequestBuilder builder = new MultiPercolateRequestBuilder(transportClient, MultiPercolateAction.INSTANCE);
-// set stuff and then execute:
-MultiPercolateResponse response = builder.get();
--------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc
deleted file mode 100644
index 82c0e4f6b9..0000000000
--- a/docs/reference/migration/migrate_5_0/plugins.asciidoc
+++ /dev/null
@@ -1,161 +0,0 @@
-[[breaking_50_plugins]]
-=== Plugin changes
-
-The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`. The
-structure of the plugin ZIP archive has changed. All the plugin files must be
-contained in a top-level directory called `elasticsearch`. If you use the
-gradle build, this structure is automatically generated.
-
-==== Plugins isolation
-
-`isolated` option has been removed. Each plugin will have its own classloader.
-
-==== Site plugins removed
-
-Site plugins have been removed. Site plugins should be reimplemented as Kibana
-plugins.
-
-==== Multicast plugin removed
-
-Multicast has been removed. Use unicast discovery, or one of the cloud
-discovery plugins.
-
-==== Plugins with custom query implementations
-
-Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their
-`QueryParser` subclass rather than `parse`. This method will take care of parsing the query from `XContent` format
-into an intermediate query representation that can be streamed between the nodes in binary format, effectively the
-query object used in the java api. Also, the query parser needs to implement the `getBuilderPrototype` method that
-returns a prototype of the `NamedWriteable` query, which allows to deserialize an incoming query by calling
-`readFrom(StreamInput)` against it, which will create a new object, see usages of `Writeable`. The `QueryParser`
-also needs to declare the generic type of the query that it supports and it's able to parse.
-The query object can then transform itself into a lucene query through the new `toQuery(QueryShardContext)` method,
-which returns a lucene query to be executed on the data node.
-
-Similarly, plugins implementing custom score functions need to implement the `fromXContent(QueryParseContext)`
-method in their `ScoreFunctionParser` subclass rather than `parse`. This method will take care of parsing
-the function from `XContent` format into an intermediate function representation that can be streamed between
-the nodes in binary format, effectively the function object used in the java api. Also, the query parser needs
-to implement the `getBuilderPrototype` method that returns a prototype of the `NamedWriteable` function, which
-allows to deserialize an incoming function by calling `readFrom(StreamInput)` against it, which will create a
-new object, see usages of `Writeable`. The `ScoreFunctionParser` also needs to declare the generic type of the
-function that it supports and it's able to parse. The function object can then transform itself into a lucene
-function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed
-on the data node.
-
-==== Cloud AWS plugin changes
-
-Cloud AWS plugin has been split in two plugins:
-
-* {plugins}/discovery-ec2.html[Discovery EC2 plugin]
-* {plugins}/repository-s3.html[Repository S3 plugin]
-
-Proxy settings for both plugins have been renamed:
-
-* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host`
-* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host`
-* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host`
-* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port`
-* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port`
-* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port`
-
-==== Cloud Azure plugin changes
-
-Cloud Azure plugin has been split in three plugins:
-
-* {plugins}/discovery-azure-classic.html[Discovery Azure plugin]
-* {plugins}/repository-azure.html[Repository Azure plugin]
-* {plugins}/store-smb.html[Store SMB plugin]
-
-If you were using the `cloud-azure` plugin for snapshot and restore, you had in `elasticsearch.yml`:
-
-[source,yaml]
------
-cloud:
- azure:
- storage:
- account: your_azure_storage_account
- key: your_azure_storage_key
------
-
-You need to give a unique id to the storage details now as you can define multiple storage accounts:
-
-[source,yaml]
------
-cloud:
- azure:
- storage:
- my_account:
- account: your_azure_storage_account
- key: your_azure_storage_key
------
-
-
-==== Cloud GCE plugin changes
-
-Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin].
-
-==== Delete-By-Query plugin removed
-
-The Delete-By-Query plugin has been removed in favor of a new <<docs-delete-by-query,Delete By Query API>>
-implementation in core. It now supports throttling, retries and cancellation but no longer supports timeouts.
-Instead use the <<docs-delete-by-query-cancel-task-api,cancel API>> to cancel deletes that run too long.
-
-==== Mapper Attachments plugin deprecated
-
-Mapper attachments has been deprecated. Users should use now the {plugins}/ingest-attachment.html[`ingest-attachment`]
-plugin.
-
-==== Passing of Java System Properties
-
-Previously, Java system properties could be passed to the plugin
-command by passing `-D` style arguments directly to the plugin script.
-This is no longer permitted and such system properties must be passed
-via ES_JAVA_OPTS.
-
-==== Custom plugins path
-
-The ability to specify a custom plugins path via `path.plugins` has
-been removed.
-
-==== ScriptPlugin
-
-Plugins that register custom scripts should implement `ScriptPlugin` and remove
-their `onModule(ScriptModule)` implementation.
-
-==== AnalysisPlugin
-
-Plugins that register custom analysis components should implement
-`AnalysisPlugin` and remove their `onModule(AnalysisModule)` implementation.
-
-==== MapperPlugin
-
-Plugins that register custom mappers should implement
-`MapperPlugin` and remove their `onModule(IndicesModule)` implementation.
-
-==== ActionPlugin
-
-Plugins that register custom actions should implement `ActionPlugin` and
-remove their `onModule(ActionModule)` implementation.
-
-Plugins that register custom `RestHandler`s should implement `ActionPlugin` and
-remove their `onModule(NetworkModule)` implemnetation.
-
-==== SearchPlugin
-
-Plugins that register custom search time behavior (`Query`, `Suggester`,
-`ScoreFunction`, `FetchSubPhase`, `Highlighter`, etc) should implement
-`SearchPlugin` and remove their `onModule(SearchModule)` implementation.
-
-==== Testing Custom Plugins
-
-`ESIntegTestCase#pluginList` has been removed. Use `Arrays.asList` instead. It
-isn't needed now that all plugins require Java 1.8.
-
-==== Mapper-Size plugin
-
-The metadata field `_size` is not accessible in aggregations, scripts and when
-sorting for indices created in 2.x even if the index has been upgraded using the <<indices-upgrade,`upgrade`>> API.
-If these features are needed in your application it is required to reindex the data with Elasticsearch 5.x.
-The easiest way to reindex old indices is to use the `reindex` API, or the reindex UI provided by
-the <<migration-plugin,Migration Plugin>>.
diff --git a/docs/reference/migration/migrate_5_0/rest.asciidoc b/docs/reference/migration/migrate_5_0/rest.asciidoc
deleted file mode 100644
index 36f4071ce1..0000000000
--- a/docs/reference/migration/migrate_5_0/rest.asciidoc
+++ /dev/null
@@ -1,86 +0,0 @@
-
-[[breaking_50_rest_api_changes]]
-=== REST API changes
-
-==== id values longer than 512 bytes are rejected
-
-When specifying an `_id` value longer than 512 bytes, the request will be
-rejected.
-
-==== `/_optimize` endpoint removed
-
-The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge`
-endpoint should be used in lieu of optimize.
-
-The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the
-`POST` HTTP verb.
-
-==== Index creation endpoint only accepts `PUT`
-
-It used to be possible to create an index by either calling `PUT index_name`
-or `POST index_name`. Only the former is now supported.
-
-==== Removed `mem` section from `/_cluster/stats` response
-
-The `mem` section contained only one value, the total memory available
-throughout all nodes in the cluster. The section was removed as it didn't
-prove useful.
-
-==== Revised node roles aggregate returned by `/_cluster/stats`
-
-The `client`, `master_only`, `data_only` and `master_data` fields have been
-removed in favor of `master`, `data`, `ingest` and `coordinating_only`. A
-node can contribute to multiple counts as it can have multiple roles. Every
-node is implicitly a coordinating node, so whenever a node has no explicit
-roles, it will be counted as coordinating only.
-
-==== Removed shard `version` information from `/_cluster/state` routing table
-
-We now store allocation id's of shards in the cluster state and use that to
-select primary shards instead of the version information.
-
-==== Node roles are not part of node attributes anymore
-
-Node roles are now returned in a specific section, called `roles`, as part of
-nodes stats and nodes info response. The new section is an array that holds all
-the different roles that each node fulfills. In case the array is returned
-empty, that means that the node is a coordinating only node.
-
-==== Forbid unquoted JSON
-
-Previously, JSON documents were allowed with unquoted field names, which isn't
-strictly JSON and broke some Elasticsearch clients. If documents were already
-indexed with unquoted fields in a previous vesrion of Elasticsearch, some
-operations may throw errors. To accompany this, a commented out JVM option has
-been added to the `jvm.options` file:
-`-Delasticsearch.json.allow_unquoted_field_names`.
-
-Note that this option is provided solely for migration purposes and will be
-removed in Elasticsearch 6.0.0.
-
-==== Analyze API changes
-
-The deprecated `filters`/`token_filters`/`char_filters` parameter has been
-renamed `filter`/`token_filter`/`char_filter`.
-
-==== `DELETE /_query` endpoint removed
-
-The `DELETE /_query` endpoint provided by the Delete-By-Query plugin has been
-removed and replaced by the <<docs-delete-by-query,Delete By Query API>>.
-
-==== Create stored script endpoint removed
-
-The `PUT /_scripts/{lang}/{id}/_create` endpoint that previously allowed to create
- indexed scripts has been removed. Indexed scripts have been replaced
- by <<modules-scripting-stored-scripts,stored scripts>>.
-
-==== Create stored template endpoint removed
-
-The `PUT /_search/template/{id}/_create` endpoint that previously allowed to create
- indexed template has been removed. Indexed templates have been replaced
- by <<pre-registered-templates, Pre-registered templates>>.
-
-==== Remove properties support
-
-Some REST endpoints (e.g., cluster update index settings) supported detecting content in the Java
-properties format (line-delimited key=value pairs). This support has been removed.
diff --git a/docs/reference/migration/migrate_5_0/scripting.asciidoc b/docs/reference/migration/migrate_5_0/scripting.asciidoc
deleted file mode 100644
index 52a26c89d1..0000000000
--- a/docs/reference/migration/migrate_5_0/scripting.asciidoc
+++ /dev/null
@@ -1,341 +0,0 @@
-[[breaking_50_scripting]]
-=== Script related changes
-
-==== Removed 1.x script and template syntax
-
-The deprecated 1.x syntax of defining inline scripts / templates and referring to file or index base scripts / templates
-have been removed.
-
-The `script` and `params` string parameters can no longer be used and instead the `script` object syntax must be used.
-This applies for the update api, script sort, `script_score` function, `script` query, `scripted_metric` aggregation and
-`script_heuristic` aggregation.
-
-So this usage of inline scripts is no longer allowed:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "lang": "groovy",
- "script": "Math.log(_score * 2) + my_modifier",
- "params": {
- "my_modifier": 8
- }
- }
-}
------------------------------------
-
-and instead this syntax must be used:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "script": {
- "lang": "groovy",
- "inline": "Math.log(_score * 2) + my_modifier",
- "params": {
- "my_modifier": 8
- }
- }
- }
-}
------------------------------------
-
-The `script` or `script_file` parameter can no longer be used to refer to file based scripts and templates and instead
-`file` must be used.
-
-This usage of referring to file based scripts is no longer valid:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "script": "calculate-score",
- "params": {
- "my_modifier": 8
- }
- }
-}
------------------------------------
-
-This usage is valid:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "script": {
- "lang": "groovy",
- "file": "calculate-score",
- "params": {
- "my_modifier": 8
- }
- }
- }
-}
------------------------------------
-
-The `script_id` parameter can no longer be used the refer to indexed based scripts and templates and instead `id` must
-be used.
-
-This usage of referring to indexed scripts is no longer valid:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "script_id": "indexedCalculateScore",
- "params": {
- "my_modifier": 8
- }
- }
-}
------------------------------------
-
-This usage is valid:
-
-[source,js]
------------------------------------
-{
- "script_score": {
- "script": {
- "id": "indexedCalculateScore",
- "lang" : "groovy",
- "params": {
- "my_modifier": 8
- }
- }
- }
-}
------------------------------------
-
-==== Template query
-
-The `query` field in the `template` query can no longer be used.
-This 1.x syntax can no longer be used:
-
-[source,js]
------------------------------------
-{
- "query": {
- "template": {
- "query": {"match_{{template}}": {}},
- "params" : {
- "template" : "all"
- }
- }
- }
-}
------------------------------------
-
-and instead the following syntax should be used:
-
-[source,js]
------------------------------------
-{
- "query": {
- "template": {
- "inline": {"match_{{template}}": {}},
- "params" : {
- "template" : "all"
- }
- }
- }
-}
------------------------------------
-
-==== Search templates
-
-The top level `template` field in the search template api has been replaced with consistent template / script object
-syntax. This 1.x syntax can no longer be used:
-
-[source,js]
------------------------------------
-{
- "template" : {
- "query": { "match" : { "{{my_field}}" : "{{my_value}}" } },
- "size" : "{{my_size}}"
- },
- "params" : {
- "my_field" : "foo",
- "my_value" : "bar",
- "my_size" : 5
- }
-}
------------------------------------
-
-and instead the following syntax should be used:
-
-[source,js]
------------------------------------
-{
- "inline" : {
- "query": { "match" : { "{{my_field}}" : "{{my_value}}" } },
- "size" : "{{my_size}}"
- },
- "params" : {
- "my_field" : "foo",
- "my_value" : "bar",
- "my_size" : 5
- }
-}
------------------------------------
-
-==== Indexed scripts and templates
-
-Indexed scripts and templates have been replaced by <<modules-scripting-stored-scripts,stored scripts>>
-which stores the scripts and templates in the cluster state instead of a dedicate `.scripts` index.
-
-For the size of stored scripts there is a soft limit of 65535 bytes. If scripts exceed that size then
-the `script.max_size_in_bytes` setting can be added to elasticsearch.yml to change the soft limit to a higher value.
-If scripts are really large, other options like native scripts should be considered.
-
-Previously indexed scripts in the `.scripts` index will not be used any more as
-Elasticsearch will now try to fetch the scripts from the cluster state. Upon upgrading
-to 5.x the `.scripts` index will remain to exist, so it can be used by a script to migrate
-the stored scripts from the `.scripts` index into the cluster state. The current format of the scripts
-and templates hasn't been changed, only the 1.x format has been removed.
-
-===== Python migration script
-
-The following Python script can be used to import your indexed scripts into the cluster state
-as stored scripts:
-
-[source,python]
------------------------------------
-from elasticsearch import Elasticsearch,helpers
-
-es = Elasticsearch([
- {'host': 'localhost'}
-])
-
-for doc in helpers.scan(es, index=".scripts", preserve_order=True):
- es.put_script(lang=doc['_type'], id=doc['_id'], body=doc['_source'])
------------------------------------
-
-This script makes use of the official Elasticsearch Python client and
-therefore you need to make sure that your have installed the client in your
-environment. For more information on this please see
-https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html[`elasticsearch-py`].
-
-===== Perl migration script
-
-The following Perl script can be used to import your indexed scripts into the cluster state
-as stored scripts:
-
-[source,perl]
------------------------------------
-use Search::Elasticsearch;
-
-my $es = Search::Elasticsearch->new( nodes => 'localhost:9200');
-my $scroll = $es->scroll_helper( index => '.scripts', sort => '_doc');
-
-while (my $doc = $scroll->next) {
- $e->put_script(
- lang => $doc->{_type},
- id => $doc->{_id},
- body => $doc->{_source}
- );
-}
------------------------------------
-
-This script makes use of the official Elasticsearch Perl client and
-therefore you need to make sure that your have installed the client in your
-environment. For more information on this please see
-https://metacpan.org/pod/Search::Elasticsearch[`Search::Elasticsearch`].
-
-===== Verifying script migration
-
-After you have moved the scripts via the provided script or otherwise then you can verify with the following
-request if the migration has happened successfully:
-
-[source,js]
------------------------------------
-GET _cluster/state?filter_path=metadata.stored_scripts
------------------------------------
-
-The response should include all your scripts from the `.scripts` index.
-After you have verified that all your scripts have been moved, optionally as a last step,
-you can delete the `.scripts` index as Elasticsearch no longer uses it.
-
-==== Indexed scripts Java APIs
-
-All the methods related to interacting with indexed scripts have been removed.
-The Java API methods for interacting with stored scripts have been added under `ClusterAdminClient` class.
-The sugar methods that used to exist on the indexed scripts API methods don't exist on the methods for
-stored scripts. The only way to provide scripts is by using `BytesReference` implementation, if a string needs to be
-provided the `BytesArray` class should be used.
-
-==== Scripting engines now register only a single language
-
-Prior to 5.0.0, script engines could register multiple languages. The Javascript
-script engine in particular registered both `"lang": "js"` and `"lang":
-"javascript"`. Script engines can now only register a single language. All
-references to `"lang": "js"` should be changed to `"lang": "javascript"` for
-existing users of the lang-javascript plugin.
-
-==== Scripting engines now register only a single extension
-
-Prior to 5.0.0 scripting engines could register multiple extensions. The only
-engine doing this was the Javascript engine, which registered "js" and
-"javascript". It now only registers the "js" file extension for on-disk scripts.
-
-==== `.javascript` files are no longer supported (use `.js`)
-
-The Javascript engine previously registered "js" and "javascript". It now only
-registers the "js" file extension for on-disk scripts.
-
-==== Removed scripting query string parameters from update rest api
-
-The `script`, `script_id` and `scripting_upsert` query string parameters have been removed from the update api.
-
-==== Java transport client
-
-The `TemplateQueryBuilder` has been moved to the `lang-mustache` module.
-Therefor when using the `TemplateQueryBuilder` from the Java native client the
-lang-mustache module should be on the classpath. Also the transport client
-should load the lang-mustache module as plugin:
-
-[source,java]
---------------------------------------------------
-TransportClient transportClient = TransportClient.builder()
- .settings(Settings.builder().put("node.name", "node"))
- .addPlugin(MustachePlugin.class)
- .build();
-transportClient.addTransportAddress(
- new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))
-);
---------------------------------------------------
-
-Also the helper methods in `QueryBuilders` class that create a `TemplateQueryBuilder` instance have been removed,
-instead the constructors on `TemplateQueryBuilder` should be used.
-
-==== Template query
-
-The `template` query has been deprecated in favour of the search template api. The `template` query is scheduled
-to be removed in the next major version.
-
-==== GeoPoint scripts
-
-The following helper methods have been removed from GeoPoint scripting:
-
-* `factorDistance`
-* `factorDistanceWithDefault`
-* `factorDistance02`
-* `factorDistance13`
-* `arcDistanceInKm`
-* `arcDistanceInKmWithDefault`
-* `arcDistanceInMiles`
-* `arcDistanceInMilesWithDefault`
-* `distanceWithDefault`
-* `distanceInKm`
-* `distanceInKmWithDefault`
-* `distanceInMiles`
-* `distanceInMilesWithDefault`
-* `geohashDistanceInKm`
-* `geohashDistanceInMiles`
-
-Instead use `arcDistance`, `arcDistanceWithDefault`, `planeDistance`, `planeDistanceWithDefault`, `geohashDistance`,
-`geohashDistanceWithDefault` and convert from default units (meters) to desired units using the appropriate constance
-(e.g., multiply by `0.001` to convert to Km). \ No newline at end of file
diff --git a/docs/reference/migration/migrate_5_0/search.asciidoc b/docs/reference/migration/migrate_5_0/search.asciidoc
deleted file mode 100644
index 09478ee325..0000000000
--- a/docs/reference/migration/migrate_5_0/search.asciidoc
+++ /dev/null
@@ -1,212 +0,0 @@
-[[breaking_50_search_changes]]
-=== Search and Query DSL changes
-
-==== `search_type`
-
-===== `search_type=count` removed
-
-The `count` search type was deprecated since version 2.0.0 and is now removed.
-In order to get the same benefits, you just need to set the value of the `size`
-parameter to `0`.
-
-For instance, the following request:
-
-[source,sh]
----------------
-GET /my_index/_search?search_type=count
-{
- "aggs": {
- "my_terms": {
- "terms": {
- "field": "foo"
- }
- }
- }
-}
----------------
-
-can be replaced with:
-
-[source,sh]
----------------
-GET /my_index/_search
-{
- "size": 0,
- "aggs": {
- "my_terms": {
- "terms": {
- "field": "foo"
- }
- }
- }
-}
----------------
-
-===== `search_type=scan` removed
-
-The `scan` search type was deprecated since version 2.1.0 and is now removed.
-All benefits from this search type can now be achieved by doing a scroll
-request that sorts documents in `_doc` order, for instance:
-
-[source,sh]
----------------
-GET /my_index/_search?scroll=2m
-{
- "sort": [
- "_doc"
- ]
-}
----------------
-
-Scroll requests sorted by `_doc` have been optimized to more efficiently resume
-from where the previous request stopped, so this will have the same performance
-characteristics as the former `scan` search type.
-
-==== `fields` parameter
-
-The `fields` parameter has been replaced by `stored_fields`.
-The `stored_fields` parameter will only return stored fields
--- it will no longer extract values from the `_source`.
-
-==== `fielddata_fields` parameter
-
-The `fielddata_fields` has been deprecated, use parameter `docvalue_fields` instead.
-
-
-==== search-exists API removed
-
-The search exists api has been removed in favour of using the search api with
-`size` set to `0` and `terminate_after` set to `1`.
-
-
-==== Deprecated queries removed
-
-The following deprecated queries have been removed:
-
-`filtered`:: Use `bool` query instead, which supports `filter` clauses too.
-`and`:: Use `must` clauses in a `bool` query instead.
-`or`:: Use `should` clauses in a `bool` query instead.
-`limit`:: Use the `terminate_after` parameter instead.
-`fquery`:: Is obsolete after filters and queries have been merged.
-`query`:: Is obsolete after filters and queries have been merged.
-`query_binary`:: Was undocumented and has been removed.
-`filter_binary`:: Was undocumented and has been removed.
-
-
-==== Changes to queries
-
-* Unsupported queries such as term queries on `geo_point` fields will now fail
- rather than returning no hits.
-
-* Removed support for fuzzy queries on numeric, date and ip fields, use range
- queries instead.
-
-* Removed support for range and prefix queries on `_uid` and `_id` fields.
-
-* Querying an unindexed field will now fail rather than returning no hits.
-
-* Removed support for the deprecated `min_similarity` parameter in `fuzzy
- query`, in favour of `fuzziness`.
-
-* Removed support for the deprecated `fuzzy_min_sim` parameter in
- `query_string` query, in favour of `fuzziness`.
-
-* Removed support for the deprecated `edit_distance` parameter in completion
- suggester, in favour of `fuzziness`.
-
-* Removed support for the deprecated `filter` and `no_match_filter` fields in `indices` query,
-in favour of `query` and `no_match_query`.
-
-* Removed support for the deprecated `filter` fields in `nested` query, in favour of `query`.
-
-* Removed support for the deprecated `minimum_should_match` and
- `disable_coord` in `terms` query, use `bool` query instead. Also removed
- support for the deprecated `execution` parameter.
-
-* Removed support for the top level `filter` element in `function_score` query, replaced by `query`.
-
-* The `collect_payloads` parameter of the `span_near` query has been deprecated. Payloads will be loaded when needed.
-
-* The `score_type` parameter to the `nested` and `has_child` queries has been
- removed in favour of `score_mode`. The `score_mode` parameter to `has_parent`
- has been deprecated in favour of the `score` boolean parameter. Also, the
- `total` score mode has been removed in favour of the `sum` mode.
-
-* When the `max_children` parameter was set to `0` on the `has_child` query
- then there was no upper limit on how many child documents were allowed to
- match. Now, `0` really means that zero child documents are allowed. If no
- upper limit is needed then the `max_children` parameter shouldn't be specified
- at all.
-
-* The `exists` query will now fail if the `_field_names` field is disabled.
-
-* The `multi_match` query will fail if `fuzziness` is used for `cross_fields`, `phrase` or `phrase_prefix` type.
-This parameter was undocumented and silently ignored before for these types of `multi_match`.
-
-* Deprecated support for the coerce, normalize, ignore_malformed parameters in GeoPolygonQuery. Use parameter validation_method instead.
-
-* Deprecated support for the coerce, normalize, ignore_malformed parameters in GeoDistanceRangeQuery. Use parameter validation_method instead.
-
-* Deprecated support for the coerce, normalize, ignore_malformed parameters in GeoDistanceQuery. Use parameter validation_method instead.
-
-* Deprecated support for the coerce, normalize, ignore_malformed parameters in GeoBoundingBoxQuery. Use parameter validation_method instead.
-
-==== Top level `filter` parameter
-
-Removed support for the deprecated top level `filter` in the search api,
-replaced by `post_filter`.
-
-==== Highlighters
-
-Removed support for multiple highlighter names, the only supported ones are:
-`plain`, `fvh` and `postings`.
-
-==== Term vectors API
-
-The term vectors APIs no longer persist unmapped fields in the mappings.
-
-The `dfs` parameter to the term vectors API has been removed completely. Term
-vectors don't support distributed document frequencies anymore.
-
-==== Sort
-
-The `reverse` parameter has been removed, in favour of explicitly
-specifying the sort order with the `order` option.
-
-The `coerce` and `ignore_malformed` parameters were deprecated in favour of `validation_method`.
-
-==== Inner hits
-
-* Top level inner hits syntax has been removed. Inner hits can now only be specified as part of the `nested`,
-`has_child` and `has_parent` queries. Use cases previously only possible with top level inner hits can now be done
-with inner hits defined inside the query dsl.
-
-* Source filtering for inner hits inside nested queries requires full field names instead of relative field names.
-This is now consistent for source filtering on other places in the search API.
-
-* Nested inner hits will now no longer include `_index`, `_type` and `_id` keys. For nested inner hits these values
-are always the same as the `_index`, `_type` and `_id` keys of the root search hit.
-
-* Parent/child inner hits will now no longer include the `_index` key. For parent/child inner hits the `_index` key is
-always the same as the the parent search hit.
-
-==== Query Profiler
-
-In the response for profiling queries, the `query_type` has been renamed to `type` and `lucene` has been renamed to
-`description`. These changes have been made so the response format is more friendly to supporting other types of profiling
-in the future.
-
-==== Search preferences
-
-The <<search-request-preference,search preference>> `_only_node` has
-been removed. The same behavior can be achieved by using `_only_nodes`
-and specifying a single node ID.
-
-The <<search-request-preference,search preference>> `_prefer_node` has
-been superseded by `_prefer_nodes`. By specifying a single node,
-`_prefer_nodes` provides the same functionality as `_prefer_node` but
-also supports specifying multiple nodes.
-
-==== Default similarity
-
-The default similarity has been changed to `BM25`.
diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc
deleted file mode 100644
index f6875f60c6..0000000000
--- a/docs/reference/migration/migrate_5_0/settings.asciidoc
+++ /dev/null
@@ -1,323 +0,0 @@
-[[breaking_50_settings_changes]]
-=== Settings changes
-
-From Elasticsearch 5.0 on all settings are validated before they are applied.
-Node level and default index level settings are validated on node startup,
-dynamic cluster and index setting are validated before they are updated/added
-to the cluster state.
-
-Every setting must be a *known* setting. All settings must have been
-registered with the node or transport client they are used with. This implies
-that plugins that define custom settings must register all of their settings
-during plugin loading using the `SettingsModule#registerSettings(Setting)`
-method.
-
-==== Index Level Settings
-
-In previous versions Elasticsearch allowed to specify index level setting
-as _defaults_ on the node level, inside the `elasticsearch.yaml` file or even via
-command-line parameters. From Elasticsearch 5.0 on only selected settings like
-for instance `index.codec` can be set on the node level. All other settings must be
-set on each individual index. To set default values on every index, index templates
-should be used instead.
-
-==== Node settings
-
-The `name` setting has been removed and is replaced by `node.name`. Usage of
-`-Dname=some_node_name` is not supported anymore.
-
-The `node.add_id_to_custom_path` was renamed to `add_lock_id_to_custom_path`.
-
-The default for the `node.name` settings is now the first 7 charachters of the node id,
-which is in turn a randomly generated UUID.
-
-The settings `node.mode` and `node.local` are removed. Local mode should be configured via
-`discovery.type: local` and `transport.type:local`. In order to disable _http_ please use `http.enabled: false`
-
-==== Node attribute settings
-
-Node level attributes used for allocation filtering, forced awareness or other node identification / grouping
-must be prefixed with `node.attr`. In previous versions it was possible to specify node attributes with the `node.`
-prefix. All node attributes except of `node.master`, `node.data` and `node.ingest` must be moved to the new `node.attr.`
-namespace.
-
-==== Node types settings
-
-The `node.client` setting has been removed. A node with such a setting set will not
-start up. Instead, each node role needs to be set separately using the existing
-`node.master`, `node.data` and `node.ingest` supported static settings.
-
-==== Gateway settings
-
-The `gateway.format` setting for configuring global and index state serialization
-format has been removed. By default, `smile` is used as the format.
-
-==== Transport Settings
-
-All settings with a `netty` infix have been replaced by their already existing
-`transport` synonyms. For instance `transport.netty.bind_host` is no longer
-supported and should be replaced by the superseding setting
-`transport.bind_host`.
-
-==== Security manager settings
-
-The option to disable the security manager `security.manager.enabled` has been
-removed. In order to grant special permissions to elasticsearch users must
-edit the local Java Security Policy.
-
-==== Network settings
-
-The `_non_loopback_` value for settings like `network.host` would arbitrarily
-pick the first interface not marked as loopback. Instead, specify by address
-scope (e.g. `_local_,_site_` for all loopback and private network addresses)
-or by explicit interface names, hostnames, or addresses.
-
-The `netty.epollBugWorkaround` settings is removed. This settings allow people to enable
-a netty work around for https://github.com/netty/netty/issues/327[a high CPU usage issue] with early JVM versions.
-This bug was http://bugs.java.com/view_bug.do?bug_id=6403933[fixed in Java 7]. Since Elasticsearch 5.0 requires Java 8 the settings is removed. Note that if the workaround needs to be reintroduced you can still set the `org.jboss.netty.epollBugWorkaround` system property to control Netty directly.
-
-==== Forbid changing of thread pool types
-
-Previously, <<modules-threadpool,thread pool types>> could be dynamically
-adjusted. The thread pool type effectively controls the backing queue for the
-thread pool and modifying this is an expert setting with minimal practical
-benefits and high risk of being misused. The ability to change the thread pool
-type for any thread pool has been removed. It is still possible to adjust
-relevant thread pool parameters for each of the thread pools (e.g., depending
-on the thread pool type, `keep_alive`, `queue_size`, etc.).
-
-==== Threadpool settings
-
-The `suggest` threadpool has been removed, now suggest requests use the
-`search` threadpool.
-
-The prefix on all thread pool settings has been changed from
-`threadpool` to `thread_pool`.
-
-The minimum size setting for a scaling thread pool has been changed
-from `min` to `core`.
-
-The maximum size setting for a scaling thread pool has been changed
-from `size` to `max`.
-
-The queue size setting for a fixed thread pool must be `queue_size`
-(all other variants that were previously supported are no longer
-supported).
-
-Thread pool settings are now node-level settings. As such, it is not
-possible to update thread pool settings via the cluster settings API.
-
-==== Analysis settings
-
-The `index.analysis.analyzer.default_index` analyzer is not supported anymore.
-If you wish to change the analyzer to use for indexing, change the
-`index.analysis.analyzer.default` analyzer instead.
-
-==== Ping settings
-
-Previously, there were three settings for the ping timeout:
-`discovery.zen.initial_ping_timeout`, `discovery.zen.ping.timeout` and
-`discovery.zen.ping_timeout`. The former two have been removed and the only
-setting key for the ping timeout is now `discovery.zen.ping_timeout`. The
-default value for ping timeouts remains at three seconds.
-
-
-`discovery.zen.master_election.filter_client` and `discovery.zen.master_election.filter_data` have
-been removed in favor of the new `discovery.zen.master_election.ignore_non_master_pings`. This setting control how ping responses
-are interpreted during master election and should be used with care and only in extreme cases. See documentation for details.
-
-==== Recovery settings
-
-Recovery settings deprecated in 1.x have been removed:
-
- * `index.shard.recovery.translog_size` is superseded by `indices.recovery.translog_size`
- * `index.shard.recovery.translog_ops` is superseded by `indices.recovery.translog_ops`
- * `index.shard.recovery.file_chunk_size` is superseded by `indices.recovery.file_chunk_size`
- * `index.shard.recovery.concurrent_streams` is superseded by `indices.recovery.concurrent_streams`
- * `index.shard.recovery.concurrent_small_file_streams` is superseded by `indices.recovery.concurrent_small_file_streams`
- * `indices.recovery.max_size_per_sec` is superseded by `indices.recovery.max_bytes_per_sec`
-
-If you are using any of these settings please take the time to review their
-purpose. All of the settings above are considered _expert settings_ and should
-only be used if absolutely necessary. If you have set any of the above setting
-as persistent cluster settings please use the settings update API and set
-their superseded keys accordingly.
-
-The following settings have been removed without replacement
-
- * `indices.recovery.concurrent_small_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders
- * `indices.recovery.concurrent_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders
-
-==== Translog settings
-
-The `index.translog.flush_threshold_ops` setting is not supported anymore. In
-order to control flushes based on the transaction log growth use
-`index.translog.flush_threshold_size` instead.
-
-Changing the translog type with `index.translog.fs.type` is not supported
-anymore, the `buffered` implementation is now the only available option and
-uses a fixed `8kb` buffer.
-
-The translog by default is fsynced after every `index`, `create`, `update`,
-`delete`, or `bulk` request. The ability to fsync on every operation is not
-necessary anymore. In fact, it can be a performance bottleneck and it's trappy
-since it enabled by a special value set on `index.translog.sync_interval`.
-Now, `index.translog.sync_interval` doesn't accept a value less than `100ms`
-which prevents fsyncing too often if async durability is enabled. The special
-value `0` is no longer supported.
-
-`index.translog.interval` has been removed.
-
-==== Request Cache Settings
-
-The deprecated settings `index.cache.query.enable` and
-`indices.cache.query.size` have been removed and are replaced with
-`index.requests.cache.enable` and `indices.requests.cache.size` respectively.
-
-`indices.requests.cache.clean_interval` has been replaced with
-`indices.cache.clean_interval` and is no longer supported.
-
-==== Field Data Cache Settings
-
-The `indices.fielddata.cache.clean_interval` setting has been replaced with
-`indices.cache.clean_interval`.
-
-==== Allocation settings
-
-The `cluster.routing.allocation.concurrent_recoveries` setting has been
-replaced with `cluster.routing.allocation.node_concurrent_recoveries`.
-
-==== Similarity settings
-
-The 'default' similarity has been renamed to 'classic'.
-
-==== Indexing settings
-
-The `indices.memory.min_shard_index_buffer_size` and
-`indices.memory.max_shard_index_buffer_size` have been removed as
-Elasticsearch now allows any one shard to use amount of heap as long as the
-total indexing buffer heap used across all shards is below the node's
-`indices.memory.index_buffer_size` (defaults to 10% of the JVM heap).
-
-==== Removed es.max-open-files
-
-Setting the system property es.max-open-files to true to get
-Elasticsearch to print the number of maximum open files for the
-Elasticsearch process has been removed. This same information can be
-obtained from the <<cluster-nodes-info>> API, and a warning is logged
-on startup if it is set too low.
-
-==== Removed es.netty.gathering
-
-Disabling Netty from using NIO gathering could be done via the escape
-hatch of setting the system property "es.netty.gathering" to "false".
-Time has proven enabling gathering by default is a non-issue and this
-non-documented setting has been removed.
-
-==== Removed es.useLinkedTransferQueue
-
-The system property `es.useLinkedTransferQueue` could be used to
-control the queue implementation used in the cluster service and the
-handling of ping responses during discovery. This was an undocumented
-setting and has been removed.
-
-==== Cache concurrency level settings removed
-
-Two cache concurrency level settings
-`indices.requests.cache.concurrency_level` and
-`indices.fielddata.cache.concurrency_level` because they no longer apply to
-the cache implementation used for the request cache and the field data cache.
-
-==== Using system properties to configure Elasticsearch
-
-Elasticsearch can no longer be configured by setting system properties.
-Instead, use `-Ename.of.setting=value.of.setting`.
-
-==== Removed using double-dashes to configure Elasticsearch
-
-Elasticsearch could previously be configured on the command line by
-setting settings via `--name.of.setting value.of.setting`. This feature
-has been removed. Instead, use `-Ename.of.setting=value.of.setting`.
-
-==== Remove support for .properties config files
-
-The Elasticsearch configuration and logging configuration can no longer be stored in the Java
-properties file format (line-delimited key=value pairs with a `.properties` extension).
-
-==== Discovery Settings
-
-The `discovery.zen.minimum_master_node` must be set for nodes that have
-`network.host`, `network.bind_host`, `network.publish_host`,
-`transport.host`, `transport.bind_host`, or `transport.publish_host`
-configuration options set. We see those nodes as in "production" mode
-and thus require the setting.
-
-==== Realtime get setting
-
-The `action.get.realtime` setting has been removed. This setting was
-a fallback realtime setting for the get and mget APIs when realtime
-wasn't specified. Now if the parameter isn't specified we always
-default to true.
-
-=== Script settings
-
-==== Indexed script settings
-
-Due to the fact that indexed script has been replaced by stored
-scripts the following settings have been replaced to:
-
-* `script.indexed` has been replaced by `script.stored`
-* `script.engine.*.indexed.aggs` has been replaced by `script.engine.*.stored.aggs` (where `*` represents the script language, like `groovy`, `mustache`, `painless` etc.)
-* `script.engine.*.indexed.mapping` has been replaced by `script.engine.*.stored.mapping` (where `*` represents the script language, like `groovy`, `mustache`, `painless` etc.)
-* `script.engine.*.indexed.search` has been replaced by `script.engine.*.stored.search` (where `*` represents the script language, like `groovy`, `mustache`, `painless` etc.)
-* `script.engine.*.indexed.update` has been replaced by `script.engine.*.stored.update` (where `*` represents the script language, like `groovy`, `mustache`, `painless` etc.)
-* `script.engine.*.indexed.plugin` has been replaced by `script.engine.*.stored.plugin` (where `*` represents the script language, like `groovy`, `mustache`, `painless` etc.)
-
-==== Script mode settings
-
-Previously script mode settings (e.g., "script.inline: true",
-"script.engine.groovy.inline.aggs: false", etc.) accepted a wide range of
-"truthy" or "falsy" values. This is now much stricter and supports only the
-`true` and `false` options.
-
-
-==== Script sandbox settings removed
-
-Prior to 5.0 a third option could be specified for the `script.inline` and
-`script.stored` settings ("sandbox"). This has been removed, You can now only
-set `script.line: true` or `script.stored: true`.
-
-==== Search settings
-
-The setting `index.query.bool.max_clause_count` has been removed. In order to
-set the maximum number of boolean clauses `indices.query.bool.max_clause_count`
-should be used instead.
-
-==== Memory lock settings
-
-The setting `bootstrap.mlockall` has been renamed to
-`bootstrap.memory_lock`.
-
-==== Snapshot settings
-
-The default setting `include_global_state` for restoring snapshots has been
-changed from `true` to `false`. It has not been changed for taking snapshots and
-still defaults to `true` in that case.
-
-==== Time value parsing
-
-The unit 'w' representing weeks is no longer supported.
-
-Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting
-timeouts "0.5s" will be rejected and should instead be input as "500ms".
-
-==== Node max local storage nodes
-
-Previous versions of Elasticsearch defaulted to allowing multiple nodes to share the same data
-directory (up to 50). This can be confusing where users accidentally startup multiple nodes and end
-up thinking that they've lost data because the second node will start with an empty data directory.
-While the default of allowing multiple nodes is friendly to playing with forming a small cluster on
-a laptop, and end-users do sometimes run multiple nodes on the same host, this tends to be the
-exception. Keeping with Elasticsearch's continual movement towards safer out-of-the-box defaults,
-and optimizing for the norm instead of the exception, the default for
-`node.max_local_storage_nodes` is now one.
diff --git a/docs/reference/migration/migrate_5_0/suggest.asciidoc b/docs/reference/migration/migrate_5_0/suggest.asciidoc
deleted file mode 100644
index 0b67711fe0..0000000000
--- a/docs/reference/migration/migrate_5_0/suggest.asciidoc
+++ /dev/null
@@ -1,91 +0,0 @@
-[[breaking_50_suggester]]
-=== Suggester changes
-
-The completion suggester has undergone a complete rewrite. This means that the
-syntax and data structure for fields of type `completion` have changed, as
-have the syntax and response of completion suggester requests.
-
-For indices created before Elasticsearch 5.0.0, `completion` fields and the
-completion suggester will continue to work as they did in Elasticsearch 2.x.
-However, it is not possible to run a completion suggester query across indices
-created in 2.x and indices created in 5.x.
-
-It is strongly recommended to reindex indices containing 2.x `completion`
-fields in 5.x to take advantage of the new features listed below.
-
-NOTE: You will need to change the structure of the completion field values
-when reindexing.
-
-==== Completion suggester is near-real time
-
-Previously, deleted suggestions could be included in results even
-after refreshing an index. Now, deletions are visible in near-real
-time, i.e. as soon as the index has been refreshed. This applies
-to suggestion entries for both context and completion suggesters.
-
-==== Completion suggester is document-oriented
-
-Suggestions are aware of the document they belong to. This enables
-retrieving any field value from the document. This is exposed
-through the query-time `payload` option in `completion` and `context`
-suggesters:
-
-[source,sh]
----------------
-GET /my_index/_search
-{
- "suggest": {
- "fooSuggestion": {
- "text": "f"
- "completion": {
- "field": "fooSuggest",
- "payload": ["field1", "field2"]
- }
- }
- }
-}
----------------
-
-Previously, `context` and `completion` suggesters supported an index-time
-`payloads` option, which was used to store and return metadata with suggestions.
-Now metadata can be stored as a field in the same document as the
-suggestion for enabling retrieval at query-time. The support for
-index-time `payloads` has been removed to avoid bloating the in-memory
-index with suggestion metadata. The time that it takes to retrieve payloads
-depends heavily on the size of the `_source` field. The smaller the `_source`,
-the faster the retrieval.
-
-==== Simpler completion indexing
-
-As suggestions are document-oriented, suggestion metadata (e.g. `output`)
-should now be specified as a field in the document. The support for specifying
-`output` when indexing suggestion entries has been removed. Now suggestion
-result entry's `text` is always the un-analyzed value of the suggestion's
-`input` (same as not specifying `output` while indexing suggestions in pre-5.0
-indices).
-
-==== Completion mapping with multiple contexts
-
-The `context` option in `completion` field mapping is now an array to support
-multiple named contexts per completion field. Note that this is sugar for
-indexing same suggestions under different name with different contexts.
-The `default` option for a named `context` has been removed. Now querying with
-no `context` against a context-enabled completion field yields results from all
-indexed suggestions. Note that performance for match-all-context query
-degrades with the number of unique context value for a given `completion` field.
-
-==== Completion suggestion with multiple context filtering
-
-Previously `context` option in a suggest request was used for filtering suggestions
-by `context` value. Now, the option has been named to `contexts` to specify
-multiple named context filters. Note that this is not supported by pre-5.0 indices.
-Following is the `contexts` snippet for a suggest query filtered by both 'color'
-and 'location' contexts:
-
-[source,sh]
----------------
-"contexts": {
- "color": [ {...} ],
- "location": [ {...} ]
-}
----------------
diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc
new file mode 100644
index 0000000000..735455d98c
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0.asciidoc
@@ -0,0 +1,41 @@
+[[breaking-changes-6.0]]
+== Breaking changes in 6.0
+
+This section discusses the changes that you need to be aware of when migrating
+your application to Elasticsearch 6.0.
+
+[float]
+=== Indices created before 6.0
+
+Elasticsearch 6.0 can read indices created in version 5.0 or above. An
+Elasticsearch 6.0 node will not start in the presence of indices created in a
+version of Elasticsearch before 5.0.
+
+[IMPORTANT]
+.Reindex indices from Elasticseach 2.x or before
+=========================================
+
+Indices created in Elasticsearch 2.x or before will need to be reindexed with
+Elasticsearch 5.x in order to be readable by Elasticsearch 6.x. The easiest
+way to reindex old indices is to use the `reindex` API.
+
+=========================================
+
+[float]
+=== Also see:
+
+* <<breaking_60_rest_changes>>
+* <<breaking_60_search_changes>>
+* <<breaking_60_docs_changes>>
+* <<breaking_60_cluster_changes>>
+* <<breaking_60_plugins_changes>>
+
+include::migrate_6_0/rest.asciidoc[]
+
+include::migrate_6_0/search.asciidoc[]
+
+include::migrate_6_0/docs.asciidoc[]
+
+include::migrate_6_0/cluster.asciidoc[]
+
+include::migrate_6_0/plugins.asciidoc[]
diff --git a/docs/reference/migration/migrate_6_0/cluster.asciidoc b/docs/reference/migration/migrate_6_0/cluster.asciidoc
new file mode 100644
index 0000000000..bd070d8d1f
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0/cluster.asciidoc
@@ -0,0 +1,27 @@
+[[breaking_60_cluster_changes]]
+=== Cluster changes
+
+==== Cluster name no longer allowed in path.data
+
+Previously the cluster name could be used in the `path.data` setting with a
+warning. This is now no longer allowed. For instance, in the previous version
+this was valid:
+
+[source,sh]
+--------------------------------------------------
+# Assuming path.data is /tmp/mydata
+# No longer supported:
+$ tree /tmp/mydata
+/tmp/mydata
+├── <cluster_name>
+│   └── nodes
+│   └── 0
+│   └── <etc>
+
+# Should be changed to:
+$ tree /tmp/mydata
+/tmp/mydata
+├── nodes
+│   └── 0
+│   └── <etc>
+--------------------------------------------------
diff --git a/docs/reference/migration/migrate_6_0/docs.asciidoc b/docs/reference/migration/migrate_6_0/docs.asciidoc
new file mode 100644
index 0000000000..5d19c000ad
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0/docs.asciidoc
@@ -0,0 +1,7 @@
+[[breaking_60_docs_changes]]
+=== Document API changes
+
+==== version type 'force' removed
+
+Document modification operations may no longer specify the `version_type` of
+`force` to override any previous version checks.
diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc
new file mode 100644
index 0000000000..ff8a75ab44
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc
@@ -0,0 +1,7 @@
+[[breaking_60_plugins_changes]]
+=== Plugins changes
+
+==== Mapper attachments plugin
+
+* The mapper attachments plugin has been depecated in elasticsearch 5.0 and is now removed.
+You can use {plugins}/ingest-attachment.html[ingest attachment plugin] instead.
diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc
new file mode 100644
index 0000000000..1e02df1f61
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0/rest.asciidoc
@@ -0,0 +1,9 @@
+[[breaking_60_rest_changes]]
+=== REST changes
+
+==== Unquoted JSON
+
+In previous versions of Elasticsearch, JSON documents were allowed to contain unquoted field names.
+This feature was removed in the 5.x series, but a backwards-compability layer was added via the
+system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compability layer
+has been removed in Elasticsearch 6.0.0.
diff --git a/docs/reference/migration/migrate_6_0/search.asciidoc b/docs/reference/migration/migrate_6_0/search.asciidoc
new file mode 100644
index 0000000000..5a98edd6d1
--- /dev/null
+++ b/docs/reference/migration/migrate_6_0/search.asciidoc
@@ -0,0 +1,7 @@
+[[breaking_60_search_changes]]
+=== Search and Query DSL changes
+
+==== Changes to queries
+
+* The `collect_payloads` parameter of the `span_near` query has been removed. Payloads will be
+ loaded when needed.
diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc
index 57b6e8ea5c..1d35025557 100644
--- a/docs/reference/modules/cluster/misc.asciidoc
+++ b/docs/reference/modules/cluster/misc.asciidoc
@@ -20,18 +20,18 @@ API can make the cluster read-write again.
[[cluster-max-tombstones]]
==== Index Tombstones
-The cluster state maintains index tombstones to explicitly denote indices that
-have been deleted. The number of tombstones maintained in the cluster state is
+The cluster state maintains index tombstones to explicitly denote indices that
+have been deleted. The number of tombstones maintained in the cluster state is
controlled by the following property, which cannot be updated dynamically:
`cluster.indices.tombstones.size`::
-Index tombstones prevent nodes that are not part of the cluster when a delete
-occurs from joining the cluster and reimporting the index as though the delete
-was never issued. To keep the cluster state from growing huge we only keep the
-last `cluster.indices.tombstones.size` deletes, which defaults to 500. You can
-increase it if you expect nodes to be absent from the cluster and miss more
-than 500 deletes. We think that is rare, thus the default. Tombstones don't take
+Index tombstones prevent nodes that are not part of the cluster when a delete
+occurs from joining the cluster and reimporting the index as though the delete
+was never issued. To keep the cluster state from growing huge we only keep the
+last `cluster.indices.tombstones.size` deletes, which defaults to 500. You can
+increase it if you expect nodes to be absent from the cluster and miss more
+than 500 deletes. We think that is rare, thus the default. Tombstones don't take
up much space, but we also think that a number like 50,000 is probably too big.
[[cluster-logger]]
@@ -46,8 +46,8 @@ The settings which control logging can be updated dynamically with the
PUT /_cluster/settings
{
"transient": {
- "logger.indices.recovery": "DEBUG"
+ "logger.org.elasticsearch.indices.recovery": "DEBUG"
}
}
-------------------------------
-
+// CONSOLE
diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc
index c5b2197903..48a77816a3 100644
--- a/docs/reference/modules/cluster/shards_allocation.asciidoc
+++ b/docs/reference/modules/cluster/shards_allocation.asciidoc
@@ -85,7 +85,11 @@ Specify when shard rebalancing is allowed:
`cluster.routing.allocation.cluster_concurrent_rebalance`::
Allow to control how many concurrent shard rebalances are
- allowed cluster wide. Defaults to `2`.
+ allowed cluster wide. Defaults to `2`. Note that this setting
+ only controls the number of concurrent shard relocations due
+ to imbalances in the cluster. This setting does not limit shard
+ relocations due to <<allocation-filtering,allocation filtering>>
+ or <<forced-awareness,forced awareness>>.
[float]
=== Shard Balancing Heuristics
diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc
index 4969224bd2..22c203b486 100644
--- a/docs/reference/modules/indices/request_cache.asciidoc
+++ b/docs/reference/modules/indices/request_cache.asciidoc
@@ -42,8 +42,10 @@ The cache can be expired manually with the <<indices-clearcache,`clear-cache` AP
[source,js]
------------------------
-curl -XPOST 'localhost:9200/kimchy,elasticsearch/_cache/clear?request_cache=true'
+POST /kimchy,elasticsearch/_cache/clear?request_cache=true
------------------------
+// CONSOLE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
[float]
==== Enabling and disabling caching
@@ -53,24 +55,26 @@ index as follows:
[source,js]
-----------------------------
-curl -XPUT localhost:9200/my_index -d'
+PUT /my_index
{
"settings": {
"index.requests.cache.enable": false
}
}
-'
-----------------------------
+// CONSOLE
It can also be enabled or disabled dynamically on an existing index with the
<<indices-update-settings,`update-settings`>> API:
[source,js]
-----------------------------
-curl -XPUT localhost:9200/my_index/_settings -d'
+PUT /my_index/_settings
{ "index.requests.cache.enable": true }
-'
-----------------------------
+// CONSOLE
+// TEST[continued]
+
[float]
==== Enabling and disabling caching per request
@@ -80,7 +84,7 @@ caching on a *per-request* basis. If set, it overrides the index-level setting:
[source,js]
-----------------------------
-curl 'localhost:9200/my_index/_search?request_cache=true' -d'
+GET /my_index/_search?request_cache=true
{
"size": 0,
"aggs": {
@@ -91,8 +95,9 @@ curl 'localhost:9200/my_index/_search?request_cache=true' -d'
}
}
}
-'
-----------------------------
+// CONSOLE
+// TEST[continued]
IMPORTANT: If your query uses a script whose result is not deterministic (e.g.
it uses a random function or references the current time) you should set the
@@ -137,12 +142,14 @@ by index, with the <<indices-stats,`indices-stats`>> API:
[source,js]
------------------------
-curl 'localhost:9200/_stats/request_cache?pretty&human'
+GET /_stats/request_cache?human
------------------------
+// CONSOLE
or by node with the <<cluster-nodes-stats,`nodes-stats`>> API:
[source,js]
------------------------
-curl 'localhost:9200/_nodes/stats/indices/request_cache?pretty&human'
+GET /_nodes/stats/indices/request_cache?human
------------------------
+// CONSOLE
diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc
index 7606521056..253fab3941 100644
--- a/docs/reference/modules/scripting.asciidoc
+++ b/docs/reference/modules/scripting.asciidoc
@@ -5,12 +5,7 @@ The scripting module enables you to use scripts to evaluate custom
expressions. For example, you could use a script to return "script fields"
as part of a search request or evaluate a custom score for a query.
-TIP: Elasticsearch now has a built-in scripting language called _Painless_
-that provides a more secure alternative for implementing
-scripts for Elasticsearch. We encourage you to try it out --
-for more information, see <<modules-scripting-painless, Painless Scripting Language>>.
-
-The default scripting language is http://groovy-lang.org/[groovy].
+The default scripting language is <<modules-scripting-painless, `Painless`>>.
Additional `lang` plugins enable you to run scripts written in other languages.
Everywhere a script can be used, you can include a `lang` parameter
to specify the language of the script.
diff --git a/docs/reference/modules/scripting/groovy.asciidoc b/docs/reference/modules/scripting/groovy.asciidoc
index 07551474e2..aaacd85f24 100644
--- a/docs/reference/modules/scripting/groovy.asciidoc
+++ b/docs/reference/modules/scripting/groovy.asciidoc
@@ -1,6 +1,8 @@
[[modules-scripting-groovy]]
=== Groovy Scripting Language
+deprecated[5.0.0,Groovy will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]
+
Groovy is the default scripting language available in Elasticsearch. Although
limited by the <<java-security-manager,Java Security Manager>>, it is not a
sandboxed language and only `file` scripts may be used by default.
diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc
index 88c2cdb11a..1191facc36 100644
--- a/docs/reference/modules/scripting/painless-syntax.asciidoc
+++ b/docs/reference/modules/scripting/painless-syntax.asciidoc
@@ -15,7 +15,7 @@ including array types, but adds some additional built-in types.
==== Def
The dynamic type `def` serves as a placeholder for any other type. It adopts the behavior
-of whatever runtime type it represents.
+of whatever runtime type it represents.
[float]
[[painless-strings]]
@@ -23,7 +23,7 @@ of whatever runtime type it represents.
String constants can be declared with single quotes, to avoid escaping horrors with JSON:
-[source,js]
+[source,painless]
---------------------------------------------------------
def mystring = 'foo';
---------------------------------------------------------
@@ -34,14 +34,14 @@ def mystring = 'foo';
Lists can be created explicitly (e.g. `new ArrayList()`) or initialized similar to Groovy:
-[source,js]
+[source,painless]
---------------------------------------------------------
def list = [1,2,3];
---------------------------------------------------------
Lists can also be accessed similar to arrays: they support subscript and `.length`:
-[source,js]
+[source,painless]
---------------------------------------------------------
def list = [1,2,3];
return list[0]
@@ -53,14 +53,14 @@ return list[0]
Maps can be created explicitly (e.g. `new HashMap()`) or initialized similar to Groovy:
-[source,js]
+[source,painless]
---------------------------------------------------------
def person = ['name': 'Joe', 'age': 63];
---------------------------------------------------------
Map keys can also be accessed as properties.
-[source,js]
+[source,painless]
---------------------------------------------------------
def person = ['name': 'Joe', 'age': 63];
person.retired = true;
@@ -69,7 +69,7 @@ return person.name
Map keys can also be accessed via subscript (for keys containing special characters):
-[source,js]
+[source,painless]
---------------------------------------------------------
return map['something-absurd!']
---------------------------------------------------------
@@ -80,13 +80,13 @@ return map['something-absurd!']
Regular expression constants are directly supported:
-[source,js]
+[source,painless]
---------------------------------------------------------
Pattern p = /[aeiou]/
---------------------------------------------------------
Patterns can only be created via this mechanism. This ensures fast performance, regular expressions
-are always constants and compiled efficiently a single time.
+are always constants and compiled efficiently a single time.
[float]
[[modules-scripting-painless-regex-flags]]
@@ -116,7 +116,7 @@ using these characters:
=== Operators
All of Java's https://docs.oracle.com/javase/tutorial/java/nutsandbolts/operators.html[operators] are
-supported with the same precedence, promotion, and semantics.
+supported with the same precedence, promotion, and semantics.
There are only a few minor differences and add-ons:
@@ -134,7 +134,7 @@ of the `switch` statement.
In addition to Java's `enhanced for` loop, the `for in` syntax from groovy can also be used:
-[source,js]
+[source,painless]
---------------------------------------------------------
for (item : list) {
...
@@ -147,7 +147,7 @@ for (item : list) {
Functions can be declared at the beginning of the script, for example:
-[source,js]
+[source,painless]
---------------------------------------------------------
boolean isNegative(def x) { x < 0 }
...
@@ -161,7 +161,7 @@ if (isNegative(someVar)) {
=== Lambda expressions
Lambda expressions and method references work the same as https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java's].
-[source,js]
+[source,painless]
---------------------------------------------------------
list.removeIf(item -> item == 2);
list.removeIf((int item) -> item == 2);
diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc
index ad36cdd6df..7995e45e7b 100644
--- a/docs/reference/modules/scripting/painless.asciidoc
+++ b/docs/reference/modules/scripting/painless.asciidoc
@@ -196,10 +196,19 @@ POST hockey/player/1/_update
[[modules-scripting-painless-regex]]
=== Regular expressions
+NOTE: Regexes are disabled by default because they circumvent Painless's
+protection against long running and memory hungry scripts. To make matters
+worse even innocuous looking regexes can have staggering performance and stack
+depth behavior. They remain an amazing powerful tool but are too scary to enable
+by default. To enable them yourself set `script.painless.regex.enabled: true` in
+`elasticsearch.yml`. We'd like very much to have a safe alternative
+implementation that can be enabled by default so check this space for later
+developments!
+
Painless's native support for regular expressions has syntax constructs:
* `/pattern/`: Pattern literals create patterns. This is the only way to create
-a pattern in painless. The pattern inside the `/`s are just
+a pattern in painless. The pattern inside the ++/++'s are just
http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[Java regular expressions].
See <<modules-scripting-painless-regex-flags>> for more.
* `=~`: The find operator return a `boolean`, `true` if a subsequence of the
diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc
index 0c29f82b4d..17ab4a8180 100644
--- a/docs/reference/modules/scripting/using.asciidoc
+++ b/docs/reference/modules/scripting/using.asciidoc
@@ -12,7 +12,7 @@ the same pattern:
"params": { ... } <3>
}
-------------------------------------
-<1> The language the script is written in, which defaults to `groovy`.
+<1> The language the script is written in, which defaults to `painless`.
<2> The script itself which may be specfied as `inline`, `id`, or `file`.
<3> Any named parameters that should be passed into the script.
diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc
index 3b3980f7ca..9fd8e06948 100644
--- a/docs/reference/modules/snapshots.asciidoc
+++ b/docs/reference/modules/snapshots.asciidoc
@@ -498,6 +498,12 @@ running snapshot was executed by mistake, or takes unusually long, it can be ter
The snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops
that snapshot before deleting the snapshot data from the repository.
+[source,sh]
+-----------------------------------
+DELETE /_snapshot/my_backup/snapshot_1
+-----------------------------------
+// CONSOLE
+
The restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can
be canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed
from the cluster as a result of this operation.
diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc
index 1c92eb24e4..bf9b6e8f9d 100644
--- a/docs/reference/modules/transport.asciidoc
+++ b/docs/reference/modules/transport.asciidoc
@@ -91,13 +91,13 @@ the JVM. It is automatically enabled when using
=== Transport Tracer
The transport module has a dedicated tracer logger which, when activated, logs incoming and out going requests. The log can be dynamically activated
-by settings the level of the `transport.tracer` logger to `TRACE`:
+by settings the level of the `org.elasticsearch.transport.TransportService.tracer` logger to `TRACE`:
[source,js]
--------------------------------------------------
curl -XPUT localhost:9200/_cluster/settings -d '{
"transient" : {
- "logger.transport.tracer" : "TRACE"
+ "logger.org.elasticsearch.transport.TransportService.tracer" : "TRACE"
}
}'
--------------------------------------------------
diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc
index 0b6214396c..3072372d17 100644
--- a/docs/reference/query-dsl/function-score-query.asciidoc
+++ b/docs/reference/query-dsl/function-score-query.asciidoc
@@ -318,7 +318,7 @@ In the above example, the field is a <<geo-point,`geo_point`>> and origin can be
math (for example `now-1h`) is supported for origin.
`scale`::
- Required for all types. Defines the distance from origin at which the computed
+ Required for all types. Defines the distance from origin + offest at which the computed
score will equal `decay` parameter. For geo fields: Can be defined as number+unit (1km, 12m,...).
Default unit is meters. For date fields: Can to be defined as a number+unit ("1h", "10d",...).
Default unit is milliseconds. For numeric field: Any number.
@@ -359,6 +359,7 @@ image:images/Gaussian.png[]
where image:images/sigma.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset`
+// \sigma^2 = -scale^2/(2 \cdot ln(decay))
image:images/sigma_calc.png[]
See <<gauss-decay>> for graphs demonstrating the curve generated by the `gauss` function.
@@ -374,6 +375,7 @@ image:images/Exponential.png[]
where again the parameter image:images/lambda.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset`
+// \lambda = ln(decay)/scale
image:images/lambda_calc.png[]
See <<exp-decay>> for graphs demonstrating the curve generated by the `exp` function.
diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc
index 5e2f0ce43c..c6496eac39 100644
--- a/docs/reference/query-dsl/geo-distance-query.asciidoc
+++ b/docs/reference/query-dsl/geo-distance-query.asciidoc
@@ -199,7 +199,7 @@ The following are options allowed on the filter:
before the distance check. Defaults to `memory` which will do in memory
checks. Can also have values of `indexed` to use indexed value check (make
sure the `geo_point` type index lat lon in this case), or `none` which
- disables bounding box optimization.
+ disables bounding box optimization. deprecated[2.2]
`_name`::
diff --git a/docs/reference/query-dsl/geo-queries.asciidoc b/docs/reference/query-dsl/geo-queries.asciidoc
index 9aaf286526..0bdf70f5b8 100644
--- a/docs/reference/query-dsl/geo-queries.asciidoc
+++ b/docs/reference/query-dsl/geo-queries.asciidoc
@@ -31,11 +31,6 @@ The queries in this group are:
Find documents with geo-points within the specified polygon.
-<<query-dsl-geohash-cell-query,`geohash_cell`>> query::
-
- Find geo-points whose geohash intersects with the geohash of the specified
- point.
-
include::geo-shape-query.asciidoc[]
@@ -46,5 +41,3 @@ include::geo-distance-query.asciidoc[]
include::geo-distance-range-query.asciidoc[]
include::geo-polygon-query.asciidoc[]
-
-include::geohash-cell-query.asciidoc[]
diff --git a/docs/reference/query-dsl/geohash-cell-query.asciidoc b/docs/reference/query-dsl/geohash-cell-query.asciidoc
deleted file mode 100644
index 27e6319bc7..0000000000
--- a/docs/reference/query-dsl/geohash-cell-query.asciidoc
+++ /dev/null
@@ -1,78 +0,0 @@
-[[query-dsl-geohash-cell-query]]
-=== Geohash Cell Query
-
-The `geohash_cell` query provides access to a hierarchy of geohashes.
-By defining a geohash cell, only <<geo-point,geopoints>>
-within this cell will match this filter.
-
-To get this filter work all prefixes of a geohash need to be indexed. In
-example a geohash `u30` needs to be decomposed into three terms: `u30`,
-`u3` and `u`. This decomposition must be enabled in the mapping of the
-<<geo-point,geopoint>> field that's going to be filtered by
-setting the `geohash_prefix` option:
-
-[source,js]
---------------------------------------------------
-PUT /my_index
-{
- "mappings" : {
- "location": {
- "properties": {
- "pin": {
- "type": "geo_point",
- "geohash": true,
- "geohash_prefix": true,
- "geohash_precision": 10
- }
- }
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-// TESTSETUP
-
-The geohash cell can defined by all formats of `geo_points`. If such a cell is
-defined by a latitude and longitude pair the size of the cell needs to be
-setup. This can be done by the `precision` parameter of the filter. This
-parameter can be set to an integer value which sets the length of the geohash
-prefix. Instead of setting a geohash length directly it is also possible to
-define the precision as distance, in example `"precision": "50m"`. (See
-<<distance-units>>.)
-
-The `neighbor` option of the filter offers the possibility to filter cells
-next to the given cell.
-
-[source,js]
---------------------------------------------------
-GET /_search
-{
- "query": {
- "bool" : {
- "must" : {
- "match_all" : {}
- },
- "filter" : {
- "geohash_cell": {
- "pin": {
- "lat": 13.4080,
- "lon": 52.5186
- },
- "precision": 3,
- "neighbors": true
- }
- }
- }
- }
-}
---------------------------------------------------
-// CONSOLE
-
-[float]
-==== Ignore Unmapped
-
-When set to `true` the `ignore_unmapped` option will ignore an unmapped field
-and will not match any documents for this query. This can be useful when
-querying multiple indexes which might have different mappings. When set to
-`false` (the default value) the query will throw an exception if the field
-is not mapped.
diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc
index 3b8352fd4e..1d648a5d29 100644
--- a/docs/reference/query-dsl/has-child-query.asciidoc
+++ b/docs/reference/query-dsl/has-child-query.asciidoc
@@ -93,3 +93,34 @@ and will not match any documents for this query. This can be useful when
querying multiple indexes which might have different mappings. When set to
`false` (the default value) the query will throw an exception if the `type`
is not mapped.
+
+[float]
+==== Sorting
+
+Parent documents can't be sorted by fields in matching child documents via the
+regular sort options. If you need to sort parent document by field in the child
+documents then you can should use the `function_score` query and then just sort
+by `_score`.
+
+Sorting blogs by child documents' `click_count` field:
+
+[source,js]
+--------------------------------------------------
+GET /_search
+{
+ "query": {
+ "has_child" : {
+ "type" : "blog_tag",
+ "score_mode" : "max",
+ "query" : {
+ "function_score" : {
+ "script_score": {
+ "script": "_score * doc['click_count'].value"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE \ No newline at end of file
diff --git a/docs/reference/query-dsl/has-parent-query.asciidoc b/docs/reference/query-dsl/has-parent-query.asciidoc
index 202bcaac43..09ec3383cc 100644
--- a/docs/reference/query-dsl/has-parent-query.asciidoc
+++ b/docs/reference/query-dsl/has-parent-query.asciidoc
@@ -63,3 +63,34 @@ and will not match any documents for this query. This can be useful when
querying multiple indexes which might have different mappings. When set to
`false` (the default value) the query will throw an exception if the `type`
is not mapped.
+
+[float]
+==== Sorting
+
+Child documents can't be sorted by fields in matching parent documents via the
+regular sort options. If you need to sort child documents by field in the parent
+documents then you can should use the `function_score` query and then just sort
+by `_score`.
+
+Sorting tags by parent document' `view_count` field:
+
+[source,js]
+--------------------------------------------------
+GET /_search
+{
+ "query": {
+ "has_parent" : {
+ "parent_type" : "blog",
+ "score" : true,
+ "query" : {
+ "function_score" : {
+ "script_score": {
+ "script": "_score * doc['view_count'].value"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE \ No newline at end of file
diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc
index e7e0f61865..40cfabdc96 100644
--- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc
+++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc
@@ -59,6 +59,6 @@ for appears.
For better solutions for _search-as-you-type_ see the
<<search-suggesters-completion,completion suggester>> and
-{guide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type].
+{defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type].
===================================================
diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc
index 2ccc84cabb..e53e468f54 100644
--- a/docs/reference/query-dsl/percolate-query.asciidoc
+++ b/docs/reference/query-dsl/percolate-query.asciidoc
@@ -19,7 +19,7 @@ PUT /my-index
"doctype": {
"properties": {
"message": {
- "type": "keyword"
+ "type": "text"
}
}
},
@@ -51,7 +51,7 @@ Register a query in the percolator:
[source,js]
--------------------------------------------------
-PUT /my-index/queries/1
+PUT /my-index/queries/1?refresh
{
"query" : {
"match" : {
@@ -88,7 +88,7 @@ The above request will yield the following response:
[source,js]
--------------------------------------------------
{
- "took": 5,
+ "took": 13,
"timed_out": false,
"_shards": {
"total": 5,
@@ -116,6 +116,7 @@ The above request will yield the following response:
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/]
<1> The query with id `1` matches our document.
@@ -179,9 +180,11 @@ Index response:
"successful": 1,
"failed": 0
},
- "created": true
+ "created": true,
+ "result": "created"
}
--------------------------------------------------
+// TESTRESPONSE
Percolating an existing document, using the index response as basis to build to new search request:
@@ -226,7 +229,7 @@ Save a query:
[source,js]
--------------------------------------------------
-PUT /my-index/queries/1
+PUT /my-index/queries/1?refresh
{
"query" : {
"match" : {
@@ -242,7 +245,7 @@ Save another query:
[source,js]
--------------------------------------------------
-PUT /my-index/queries/2
+PUT /my-index/queries/2?refresh
{
"query" : {
"match" : {
@@ -284,7 +287,7 @@ This will yield the following response.
[source,js]
--------------------------------------------------
{
- "took": 83,
+ "took": 7,
"timed_out": false,
"_shards": {
"total": 5,
@@ -335,6 +338,7 @@ This will yield the following response.
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took": 7,/"took": "$body.took",/]
Instead of the query in the search request highlighting the percolator hits, the percolator queries are highlighting
the document defined in the `percolate` query.
diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc
new file mode 100644
index 0000000000..d9e96635a2
--- /dev/null
+++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc
@@ -0,0 +1,43 @@
+[[query-dsl-span-field-masking-query]]
+=== Span Field Masking Query
+
+Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. The span field masking query maps to Lucene's `SpanFieldMaskingQuery`
+
+This can be used to support queries like `span-near` or `span-or` across different fields, which is not ordinarily permitted.
+
+Span field masking query is invaluable in conjunction with *multi-fields* when same content is indexed with multiple analyzers. For instance we could index a field with the standard analyzer which breaks text up into words, and again with the english analyzer which stems words into their root form.
+
+Example:
+
+[source,js]
+--------------------------------------------------
+GET /_search
+{
+ "query": {
+ "span_near": {
+ "clauses": [
+ {
+ "span_term": {
+ "text": "quick brown"
+ }
+ },
+ {
+ "field_masking_span": {
+ "query": {
+ "span_term": {
+ "text.stems": "fox"
+ }
+ },
+ "field": "text"
+ }
+ }
+ ],
+ "slop": 5,
+ "in_order": false
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+
+Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. \ No newline at end of file
diff --git a/docs/reference/query-dsl/span-queries.asciidoc b/docs/reference/query-dsl/span-queries.asciidoc
index 63aad48d98..4a1a019574 100644
--- a/docs/reference/query-dsl/span-queries.asciidoc
+++ b/docs/reference/query-dsl/span-queries.asciidoc
@@ -47,6 +47,9 @@ Accepts a list of span queries, but only returns those spans which also match a
The result from a single span query is returned as long is its span falls
within the spans returned by a list of other span queries.
+<<query-dsl-span-field-masking-query,`field_masking_span` query>>::
+
+Allows queries like `span-near` or `span-or` across different fields.
include::span-term-query.asciidoc[]
@@ -63,3 +66,5 @@ include::span-not-query.asciidoc[]
include::span-containing-query.asciidoc[]
include::span-within-query.asciidoc[]
+
+include::span-field-masking-query.asciidoc[] \ No newline at end of file
diff --git a/docs/reference/query-dsl/wildcard-query.asciidoc b/docs/reference/query-dsl/wildcard-query.asciidoc
index ad82c029d8..ba1c72bb1e 100644
--- a/docs/reference/query-dsl/wildcard-query.asciidoc
+++ b/docs/reference/query-dsl/wildcard-query.asciidoc
@@ -4,7 +4,7 @@
Matches documents that have fields matching a wildcard expression (*not
analyzed*). Supported wildcards are `*`, which matches any character
sequence (including the empty one), and `?`, which matches any single
-character. Note this query can be slow, as it needs to iterate over many
+character. Note that this query can be slow, as it needs to iterate over many
terms. In order to prevent extremely slow wildcard queries, a wildcard
term should not start with one of the wildcards `*` or `?`. The wildcard
query maps to Lucene `WildcardQuery`.
diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc
index 29a9986bc8..570568b43a 100644
--- a/docs/reference/redirects.asciidoc
+++ b/docs/reference/redirects.asciidoc
@@ -131,13 +131,6 @@ The `geo_shape` filter has been replaced by the <<query-dsl-geo-shape-query>>.
It behaves as a query in ``query context'' and as a filter in ``filter
context'' (see <<query-dsl>>).
-[role="exclude",id="query-dsl-geohash-cell-filter"]
-=== Geohash Cell Filter
-
-The `geohash_cell` filter has been replaced by the <<query-dsl-geohash-cell-query>>.
-It behaves as a query in ``query context'' and as a filter in ``filter
-context'' (see <<query-dsl>>).
-
[role="exclude",id="query-dsl-has-child-filter"]
=== Has Child Filter
@@ -255,6 +248,12 @@ The `fuzzy_like_this_field` or `flt_field` query has been removed. Instead use
the <<query-dsl-match-query-fuzziness,`fuzziness`>> parameter with the
<<query-dsl-match-query,`match` query>> or the <<query-dsl-mlt-query>>.
+[role="exclude",id="query-dsl-geohash-cell-query"]
+=== Geohash Cell Query
+
+The `geohash_cell` query has been removed. Instead use the
+<<query-dsl-geo-bounding-box-query, Geo Bounding Box Query>>.
+
[role="exclude",id="search-more-like-this"]
=== More Like This API
@@ -377,6 +376,7 @@ GET _search
}
}
-------------------------
+// NOTCONSOLE
move the query and filter to the `must` and `filter` parameters in the `bool`
query:
@@ -401,6 +401,7 @@ GET _search
}
}
-------------------------
+// CONSOLE
[role="exclude",id="query-dsl-or-query"]
=== Or query
diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc
index d8daa87754..267525b1b3 100644
--- a/docs/reference/release-notes.asciidoc
+++ b/docs/reference/release-notes.asciidoc
@@ -3,19 +3,5 @@
[partintro]
--
-This section summarizes the changes in each release.
-
-* <<release-notes-5.0.0-alpha5>>
-* <<release-notes-5.0.0-alpha4>>
-* <<release-notes-5.0.0-alpha3>>
-* <<release-notes-5.0.0-alpha2>>
-* <<release-notes-5.0.0-alpha1>>
-* <<release-notes-5.0.0-alpha1-2x>>
-
+This section will summarize the changes in released versions.
--
-include::release-notes/5.0.0-alpha5.asciidoc[]
-include::release-notes/5.0.0-alpha4.asciidoc[]
-include::release-notes/5.0.0-alpha3.asciidoc[]
-include::release-notes/5.0.0-alpha2.asciidoc[]
-include::release-notes/5.0.0-alpha1.asciidoc[]
-include::release-notes/5.0.0-alpha1-2x.asciidoc[]
diff --git a/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc b/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc
deleted file mode 100644
index 6c5e23b55f..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc
+++ /dev/null
@@ -1,604 +0,0 @@
-[[release-notes-5.0.0-alpha1-2x]]
-== 5.0.0-alpha1 Release Notes (Changes previously released in 2.x)
-
-The changes listed below were first released in the 2.x series. Changes
-released for the first time in Elasticsearch 5.0.0-alpha1 are listed in
-<<release-notes-5.0.0-alpha1>>.
-
-[[breaking-5.0.0-alpha1-2x]]
-[float]
-=== Breaking changes
-
-Index APIs::
-* Change Field stats API response to include both number and string based min and max values {pull}14674[#14674] (issue: {issue}14404[#14404])
-* Add Force Merge API, deprecate Optimize API {pull}13778[#13778]
-
-Internal::
-* Forbid changing thread pool types {pull}14367[#14367] (issues: {issue}14294[#14294], {issue}2509[#2509], {issue}2858[#2858], {issue}5152[#5152])
-
-Logging::
-* Log cluster health status changes {pull}14557[#14557] (issue: {issue}11657[#11657])
-
-Mapping::
-* Add per-index setting to limit number of nested fields {pull}15989[#15989] (issue: {issue}14983[#14983])
-
-Nested Docs::
-* If sorting by nested field then the `nested_path` should always be specified {pull}13429[#13429] (issue: {issue}13420[#13420])
-
-Scripting::
-* Filter classes loaded by scripts {pull}15262[#15262]
-* Lock down javascript and python script engines better {pull}13924[#13924]
-
-Search::
-* Limit the size of the result window to a dynamic property {pull}13188[#13188] (issue: {issue}9311[#9311])
-
-
-[[breaking-java-5.0.0-alpha1-2x]]
-[float]
-=== Breaking Java changes
-
-Allocation::
-* Speed up shard balancer by reusing shard model while moving shards that can no longer be allocated to a node {pull}16926[#16926]
-
-
-[[feature-5.0.0-alpha1-2x]]
-[float]
-=== New features
-
-Aggregations::
-* Adds geo_centroid metric aggregator {pull}13846[#13846] (issue: {issue}13621[#13621])
-* Add `percentiles_bucket` pipeline aggregation {pull}13186[#13186]
-* Add `stats_bucket` / `extended_stats_bucket` pipeline aggs {pull}13128[#13128]
-
-Geo::
-* Add CONTAINS relation to geo_shape query {pull}14810[#14810] (issue: {issue}14713[#14713])
-* Add support for Lucene 5.4 GeoPoint queries {pull}14537[#14537]
-* Add GeoPointV2 Field Mapping {pull}14536[#14536]
-
-Network::
-* Allow binding to multiple addresses. {pull}13954[#13954] (issue: {issue}13592[#13592])
-
-Plugin Analysis Phonetic::
-* Add support for `daitch_mokotoff` {pull}14834[#14834]
-
-Plugin Cloud AWS::
-* Add support for S3 storage class {pull}13656[#13656] (issue: {issue}13655[#13655])
-
-Plugins::
-* Decentralize plugin security {pull}14108[#14108]
-
-Search::
-* Add query profiler {pull}14889[#14889] (issues: {issue}12974[#12974], {issue}6699[#6699])
-
-
-
-[[enhancement-5.0.0-alpha1-2x]]
-[float]
-=== Enhancements
-
-Aliases::
-* Add support to _aliases endpoint to specify multiple indices and aliases in one action {pull}15305[#15305] (issue: {issue}15186[#15186])
-
-Allocation::
-* Skip capturing least/most FS info for an FS with no total {pull}16001[#16001] (issue: {issue}15919[#15919])
-* Speed improvements for BalancedShardsAllocator {pull}15678[#15678] (issue: {issue}6372[#6372])
-* Simplify delayed shard allocation {pull}14808[#14808]
-* Add cluster-wide setting for total shard limit {pull}14563[#14563] (issue: {issue}14456[#14456])
-* Early terminate high disk watermark checks on single data node cluster {pull}13882[#13882] (issue: {issue}9391[#9391])
-* Also use PriorityComparator in shard balancer {pull}13256[#13256] (issue: {issue}13249[#13249])
-* Add support for filtering by publish IP address {pull}8801[#8801]
-
-Analysis::
-* Add detail response support for _analyze API {pull}11660[#11660] (issue: {issue}11076[#11076])
-
-CAT API::
-* Add sync_id to cat shards API {pull}14712[#14712] (issue: {issue}14705[#14705])
-* Add duration field to /_cat/snapshots {pull}14385[#14385]
-* Add cat API for repositories and snapshots {pull}14247[#14247] (issue: {issue}13919[#13919])
-* Adds disk used by indices to _cat/allocation {pull}13783[#13783] (issue: {issue}13529[#13529])
-
-Cluster::
-* Shard state action request logging {pull}16396[#16396]
-* Safe cluster state task notifications {pull}15777[#15777]
-* Reroute once per batch of shard failures {pull}15510[#15510]
-* Add callback for publication of new cluster state {pull}15494[#15494] (issue: {issue}15482[#15482])
-* Use general cluster state batching mechanism for shard started {pull}15023[#15023] (issues: {issue}14725[#14725], {issue}14899[#14899])
-* Use general cluster state batching mechanism for shard failures {pull}15016[#15016] (issues: {issue}14725[#14725], {issue}14899[#14899])
-* Set an newly created IndexShard's ShardRouting before exposing it to operations {pull}14918[#14918] (issue: {issue}10708[#10708])
-* Uniform exceptions for TransportMasterNodeAction {pull}14737[#14737]
-
-Core::
-* Remove log4j exception hiding {pull}16834[#16834]
-* Avoid cloning MessageDigest instances {pull}16479[#16479]
-* Add a hard check to ensure we are running with the expected lucene version {pull}16305[#16305] (issue: {issue}16301[#16301])
-* If we can't get a MAC address for the node, use a dummy one {pull}15266[#15266] (issue: {issue}10099[#10099])
-* Simplify shard inactive logging {pull}15259[#15259] (issue: {issue}15252[#15252])
-* Simplify IndexingMemoryController#checkIdle {pull}15252[#15252] (issue: {issue}15251[#15251])
-* IndexingMemoryController should not track shard index states {pull}15251[#15251] (issues: {issue}13918[#13918], {issue}15225[#15225])
-* Verify Checksum once it has been fully written to fail as soon as possible {pull}13896[#13896]
-
-Discovery::
-* Don't allow nodes with missing custom meta data to join cluster {pull}15401[#15401] (issue: {issue}13445[#13445])
-
-Exceptions::
-* Added file name to exceptions when failing to read index state {pull}16850[#16850] (issue: {issue}16713[#16713])
-* Add Exception class name to message in `NotSerializableExceptionWrapper` {pull}16325[#16325]
-* Deduplicate cause if already contained in shard failures {pull}14432[#14432]
-* Give a better exception when running from freebsd jail without enforce_statfs=1 {pull}14135[#14135] (issue: {issue}12018[#12018])
-* Make root_cause of field conflicts more obvious {pull}13976[#13976] (issue: {issue}12839[#12839])
-* Use a dedicated id to serialize EsExceptions instead of it's class name. {pull}13629[#13629]
-
-Fielddata::
-* Update GeoPoint FieldData for GeoPointV2 {pull}14345[#14345]
-
-Geo::
-* Upgrade GeoPointField to use Lucene 5.5 PrefixEncoding {pull}16482[#16482]
-* Geo: Fix toString() in GeoDistanceRangeQuery and GeoPolygonQuery {pull}15026[#15026]
-* Enable GeoPointV2 with backward compatibility testing {pull}14667[#14667] (issues: {issue}10761[#10761], {issue}11159[#11159], {issue}9859[#9859])
-* Refactor Geo utilities to Lucene 5.4 {pull}14339[#14339]
-
-Index APIs::
-* Add option to disable closing indices {pull}14169[#14169] (issue: {issue}14168[#14168])
-
-Index Templates::
-* Disallow index template pattern to be the same as an alias name {pull}15184[#15184] (issue: {issue}14842[#14842])
-
-Internal::
-* Implement available for all StreamInput classes {pull}17218[#17218]
-* Cleanup search sub transport actions and collapse o.e.action.search.type package into o.e.action.search {pull}16758[#16758] (issue: {issue}11710[#11710])
-* Simplify the Text API. {pull}15511[#15511]
-* Simpler using compressed oops flag representation {pull}15509[#15509] (issue: {issue}15489[#15489])
-* Info on compressed ordinary object pointers {pull}15489[#15489] (issues: {issue}13187[#13187], {issue}455[#455])
-* Explicitly log cluster state update failures {pull}15428[#15428] (issues: {issue}14899[#14899], {issue}15016[#15016], {issue}15023[#15023])
-* Use transport service to handle RetryOnReplicaException to execute replica action on the current node {pull}15363[#15363]
-* Make IndexShard operation be more explicit about whether they are expected to run on a primary or replica {pull}15282[#15282]
-* Avoid trace logging allocations in TransportBroadcastByNodeAction {pull}15221[#15221]
-* Only trace log shard not available exceptions {pull}14950[#14950] (issue: {issue}14927[#14927])
-* Transport options should be immutable {pull}14760[#14760]
-* Fix dangling comma in ClusterBlock#toString {pull}14483[#14483]
-* Improve some logging around master election and cluster state {pull}14481[#14481]
-* Add System#exit(), Runtime#exit() and Runtime#halt() to forbidden APIs {pull}14473[#14473] (issue: {issue}12596[#12596])
-* Simplify XContent detection. {pull}14472[#14472]
-* Add threadgroup isolation. {pull}14353[#14353]
-* Cleanup plugin security {pull}14311[#14311]
-* Add workaround for JDK-8014008 {pull}14274[#14274]
-* Refactor retry logic for TransportMasterNodeAction {pull}14222[#14222]
-* Remove MetaDataSerivce and it's semaphores {pull}14159[#14159] (issue: {issue}1296[#1296])
-* Cleanup IndexMetaData {pull}14119[#14119]
-* Add SpecialPermission to guard exceptions to security policy. {pull}13854[#13854]
-* Clean up scripting permissions. {pull}13844[#13844]
-* Factor groovy out of core into lang-groovy {pull}13834[#13834] (issue: {issue}13725[#13725])
-* More helpful error message on parameter order {pull}13737[#13737]
-* Factor expressions scripts out to lang-expression plugin {pull}13726[#13726] (issue: {issue}13725[#13725])
-* Cleanup InternalClusterInfoService {pull}13543[#13543]
-* Remove and forbid use of com.google.common.base.Throwables {pull}13409[#13409] (issue: {issue}13224[#13224])
-* Remove cyclic dependencies between IndexService and FieldData / BitSet caches {pull}13381[#13381]
-* Remove and forbid use of com.google.common.base.Objects {pull}13355[#13355] (issue: {issue}13224[#13224])
-* Enable indy (invokedynamic) compile flag for Groovy scripts by default {pull}8201[#8201] (issue: {issue}8184[#8184])
-
-Java API::
-* Add created flag to IndexingOperationListener#postIndex {pull}17340[#17340] (issue: {issue}17333[#17333])
-* Align handling of interrupts in BulkProcessor {pull}15527[#15527] (issue: {issue}14833[#14833])
-* BulkProcessor backs off exponentially by default {pull}15513[#15513] (issue: {issue}14829[#14829])
-* Reject refresh usage in bulk items when using and fix NPE when no source {pull}15082[#15082] (issue: {issue}7361[#7361])
-* BulkProcessor retries after request handling has been rejected due to a full thread pool {pull}14829[#14829] (issue: {issue}14620[#14620])
-
-Logging::
-* Log suppressed stack traces under DEBUG {pull}16627[#16627] (issues: {issue}12991[#12991], {issue}15329[#15329], {issue}16622[#16622])
-* Add circuit breaker name to logging package {pull}14661[#14661]
-* Move logging for the amount of free disk to TRACE {pull}14403[#14403] (issue: {issue}12843[#12843])
-* Map log-level 'trace' to JDK-Level 'FINEST' {pull}14234[#14234]
-
-Mapping::
-* Expose the reason why a mapping merge is issued. {pull}16059[#16059] (issue: {issue}15989[#15989])
-* Add sub-fields support to `bool` fields. {pull}15636[#15636] (issue: {issue}6587[#6587])
-* Improve cross-type dynamic mapping updates. {pull}15633[#15633] (issue: {issue}15568[#15568])
-* Make mapping updates more robust. {pull}15539[#15539]
-* Make mapping serialization more robust. {pull}15480[#15480]
-* Make mappings immutable. {pull}15313[#15313] (issue: {issue}9365[#9365])
-* Make MappedFieldType.checkTypeName part of MappedFieldType.checkCompatibility. {pull}15245[#15245]
-* Register field mappers at the node level. {pull}14896[#14896] (issue: {issue}14828[#14828])
-
-Network::
-* Provide better error message when an incompatible node connects to a node {pull}17182[#17182] (issue: {issue}17090[#17090])
-* Add additional fallback to http.publish_port and restrict fallback to transport.publish_port {pull}16626[#16626] (issue: {issue}14535[#14535])
-* only allow code to bind to the user's configured port numbers/ranges {pull}14549[#14549]
-* Port of publishAddress should match port of corresponding boundAddress {pull}14535[#14535] (issues: {issue}14503[#14503], {issue}14513[#14513], {issue}14514[#14514])
-
-Packaging::
-* Windows service: Use JAVA_HOME environment variable in registry {pull}16552[#16552] (issue: {issue}13521[#13521])
-* Default standard output to the journal in systemd {pull}16159[#16159] (issues: {issue}15315[#15315], {issue}16134[#16134])
-* Use egrep instead of grep -E for Solaris {pull}15755[#15755] (issue: {issue}15628[#15628])
-* punch thru symlinks when loading plugins/modules {pull}15311[#15311]
-* set ActiveProcessLimit=1 on windows {pull}15055[#15055]
-* set RLIMIT_NPROC = 0 on bsd/os X systems. {pull}15039[#15039]
-* Drop ability to execute on Solaris {pull}14200[#14200]
-* Nuke ES_CLASSPATH appending, JarHell fail on empty classpath elements {pull}13880[#13880] (issues: {issue}13812[#13812], {issue}13864[#13864])
-* improve seccomp syscall filtering {pull}13829[#13829]
-* Block process execution with seccomp on linux/amd64 {pull}13753[#13753]
-* Get lang-javascript, lang-python, securemock ready for script refactoring {pull}13695[#13695]
-* Remove some bogus permissions only needed for tests. {pull}13620[#13620]
-* Remove java.lang.reflect.ReflectPermission "suppressAccessChecks" {pull}13603[#13603]
-* Remove JAVA_HOME detection from the debian init script {pull}13514[#13514] (issues: {issue}13403[#13403], {issue}9774[#9774])
-
-Plugin Cloud AWS::
-* Add ap-northeast-2 (seoul) endpoints for EC2 discovery and S3 snapshots {pull}16167[#16167] (issue: {issue}16166[#16166])
-* Add aws canned acl {pull}14297[#14297] (issue: {issue}14103[#14103])
-* Improved building of disco nodes {pull}14155[#14155]
-* Enable S3SignerType {pull}13360[#13360] (issue: {issue}13332[#13332])
-
-Plugin Cloud Azure::
-* Add support for secondary azure storage account {pull}13779[#13779] (issue: {issue}13228[#13228])
-
-Plugin Cloud GCE::
-* cloud-gce plugin should check `discovery.type` {pull}13809[#13809] (issue: {issue}13614[#13614])
-* Adding backoff from retries on GCE errors {pull}13671[#13671] (issue: {issue}13460[#13460])
-
-Plugin Discovery EC2::
-* Adding US-Gov-West {pull}14358[#14358]
-
-Plugin Ingest Attachment::
-* Fix attachments plugins with docx {pull}17059[#17059] (issue: {issue}16864[#16864])
-
-Plugins::
-* Expose http.type setting, and collapse al(most all) modules relating to transport/http {pull}15434[#15434] (issue: {issue}14148[#14148])
-* Ban RuntimePermission("getClassLoader") {pull}15253[#15253]
-* Add nicer error message when a plugin descriptor is missing {pull}15200[#15200] (issue: {issue}15197[#15197])
-* Don't be lenient in PluginService#processModule(Module) {pull}14306[#14306]
-* Adds a validation for plugins script to check if java is set {pull}13633[#13633] (issue: {issue}13613[#13613])
-* Output plugin info only in verbose mode {pull}12908[#12908] (issue: {issue}12907[#12907])
-
-Query DSL::
-* Allow CIDR notation in query string query {pull}14773[#14773] (issue: {issue}7464[#7464])
-* Internal: simplify filtered query conversion to lucene query {pull}13312[#13312] (issue: {issue}13272[#13272])
-
-REST::
-* Make XContentGenerator.writeRaw* safer. {pull}15358[#15358]
-* Filter path refactoring {pull}14390[#14390] (issues: {issue}10980[#10980], {issue}11560[#11560], {issue}13344[#13344])
-
-Recovery::
-* Handle cancel exceptions on recovery target if the cancel comes from the source {pull}15309[#15309]
-* Decouple routing and primary operation logic in TransportReplicationAction {pull}14852[#14852]
-
-Reindex API::
-* Implement helpful interfaces in reindex requests {pull}17032[#17032]
-* Reindex should timeout if sub-requests timeout {pull}16962[#16962]
-* Teach reindex to retry on rejection {pull}16556[#16556] (issue: {issue}16093[#16093])
-
-Scripting::
-* Remove suppressAccessChecks permission for Groovy script plugin {pull}16839[#16839] (issue: {issue}16527[#16527])
-* Class permission for Groovy references {pull}16660[#16660] (issue: {issue}16657[#16657])
-* Scripting: Allow to get size of array in mustache {pull}16193[#16193]
-* Enhancements to the mustache script engine {pull}15661[#15661]
-* Add property permissions so groovy scripts can serialize json {pull}14500[#14500] (issue: {issue}14488[#14488])
-* Remove ScriptEngineService.unwrap. {pull}13958[#13958]
-* Remove ScriptEngineService.execute. {pull}13956[#13956]
-
-Search::
-* Caching Weight wrappers should propagate the BulkScorer. {pull}14317[#14317]
-* fix numerical issue in function score query {pull}14085[#14085]
-* Optimize scrolls for constant-score queries. {pull}13311[#13311]
-
-Settings::
-* Log warning if max file descriptors too low {pull}16506[#16506]
-
-Snapshot/Restore::
-* Support wildcards for getting repositories and snapshots {pull}15151[#15151] (issue: {issue}4758[#4758])
-* Add ignore_unavailable parameter to skip unavailable snapshot {pull}14471[#14471] (issue: {issue}13887[#13887])
-* Simplify the BlobContainer blob writing interface {pull}13434[#13434]
-
-Stats::
-* Pull Fields instance once from LeafReader in completion stats {pull}15090[#15090] (issue: {issue}6593[#6593])
-* Add os.allocated_processors stats {pull}14409[#14409] (issue: {issue}13917[#13917])
-* Adds stats counter for failed indexing requests {pull}13130[#13130] (issue: {issue}8938[#8938])
-
-Top Hits::
-* Put method addField on TopHitsBuilder {pull}14597[#14597] (issue: {issue}12962[#12962])
-
-Translog::
-* Check for tragic event on all kinds of exceptions not only ACE and IOException {pull}15535[#15535]
-
-Tribe Node::
-* Tribe nodes should apply cluster state updates in batches {pull}14993[#14993] (issues: {issue}14725[#14725], {issue}14899[#14899])
-
-
-
-[[bug-5.0.0-alpha1-2x]]
-[float]
-=== Bug fixes
-
-Aggregations::
-* Prevents exception being raised when ordering by an aggregation which wasn't collected {pull}17379[#17379] (issue: {issue}17225[#17225])
-* Setting 'other' bucket on empty aggregation {pull}17264[#17264] (issue: {issue}16546[#16546])
-* Build empty extended stats aggregation if no docs collected for bucket {pull}16972[#16972] (issues: {issue}16812[#16812], {issue}9544[#9544])
-* Set meta data for pipeline aggregations {pull}16516[#16516] (issue: {issue}16484[#16484])
-* Filter(s) aggregation should create weights only once. {pull}15998[#15998]
-* Make `missing` on terms aggs work with all execution modes. {pull}15746[#15746] (issue: {issue}14882[#14882])
-* Run pipeline aggregations for empty buckets added in the Range Aggregation {pull}15519[#15519] (issue: {issue}15471[#15471])
-* [Children agg] fix bug that prevented all child docs from being evaluated {pull}15457[#15457]
-* Correct typo in class name of StatsAggregator {pull}15321[#15321] (issue: {issue}14730[#14730])
-* Fix significant terms reduce for long terms {pull}14948[#14948] (issue: {issue}13522[#13522])
-* Fix NPE in Derivative Pipeline when current bucket value is null {pull}14745[#14745]
-* Pass extended bounds into HistogramAggregator when creating an unmapped aggregator {pull}14742[#14742] (issue: {issue}14735[#14735])
-* Added correct generic type parameter on ScriptedMetricBuilder {pull}14018[#14018] (issue: {issue}13986[#13986])
-* Pipeline Aggregations at the root of the agg tree are now validated {pull}13475[#13475] (issue: {issue}13179[#13179])
-
-Aliases::
-* Fix _aliases filter and null parameters {pull}16553[#16553] (issues: {issue}16547[#16547], {issue}16549[#16549])
-
-Allocation::
-* IndicesStore checks for `allocated elsewhere` for every shard not alocated on the local node {pull}17106[#17106]
-* Prevent peer recovery from node with older version {pull}15775[#15775]
-* Fix calculation of next delay for delayed shard allocation {pull}14765[#14765]
-* Take ignored unallocated shards into account when making allocation decision {pull}14678[#14678] (issue: {issue}14670[#14670])
-* Only allow rebalance operations to run if all shard store data is available {pull}14591[#14591] (issue: {issue}14387[#14387])
-* Delayed allocation can miss a reroute {pull}14494[#14494] (issues: {issue}14010[#14010], {issue}14011[#14011], {issue}14445[#14445])
-* Check rebalancing constraints when shards are moved from a node they can no longer remain on {pull}14259[#14259] (issue: {issue}14057[#14057])
-
-Analysis::
-* Analysis : Allow string explain param in JSON {pull}16977[#16977] (issue: {issue}16925[#16925])
-* Analysis : Fix no response from Analyze API without specified index {pull}15447[#15447] (issue: {issue}15148[#15148])
-
-Bulk::
-* Bulk api: fail deletes when routing is required but not specified {pull}16675[#16675] (issues: {issue}10136[#10136], {issue}16645[#16645])
-* Do not release unacquired semaphore {pull}14909[#14909] (issue: {issue}14908[#14908])
-
-CAT API::
-* Fix column aliases in _cat/indices, _cat/nodes and _cat/shards APIs {pull}17145[#17145] (issue: {issue}17101[#17101])
-* Properly set indices and indicesOptions on subrequest made by /_cat/indices {pull}14360[#14360]
-
-CRUD::
-* Throw exception if content type could not be determined in Update API {pull}15904[#15904] (issue: {issue}15822[#15822])
-* Index name expressions should not be broken up {pull}13691[#13691] (issue: {issue}13665[#13665])
-
-Cache::
-* Handle closed readers in ShardCoreKeyMap {pull}16027[#16027]
-
-Cluster::
-* Index deletes not applied when cluster UUID has changed {pull}16825[#16825] (issue: {issue}11665[#11665])
-* Only fail the relocation target when a replication request on it fails {pull}15791[#15791] (issue: {issue}15790[#15790])
-* Handle shards assigned to nodes that are not in the cluster state {pull}14586[#14586] (issue: {issue}14584[#14584])
-* Bulk cluster state updates on index deletion {pull}11258[#11258] (issue: {issue}7295[#7295])
-
-Core::
-* BitSetFilterCache duplicates its content. {pull}15836[#15836] (issue: {issue}15820[#15820])
-* Limit the max size of bulk and index thread pools to bounded number of processors {pull}15585[#15585] (issue: {issue}15582[#15582])
-* AllTermQuery's scorer should skip segments that never saw the requested term {pull}15506[#15506]
-* Include root-cause exception when we fail to change shard's index buffer {pull}14867[#14867]
-* Restore thread interrupt flag after an InterruptedException {pull}14799[#14799] (issue: {issue}14798[#14798])
-* Record all bytes of the checksum in VerifyingIndexOutput {pull}13923[#13923] (issues: {issue}13848[#13848], {issue}13896[#13896])
-* When shard becomes active again, immediately increase its indexing buffer {pull}13918[#13918] (issue: {issue}13802[#13802])
-* Close TokenStream in finally clause {pull}13870[#13870] (issue: {issue}11947[#11947])
-* LoggingRunnable.run should catch and log all errors, not just Exception? {pull}13718[#13718] (issue: {issue}13487[#13487])
-
-Exceptions::
-* Fix ensureNodesAreAvailable's error message {pull}14007[#14007] (issue: {issue}13957[#13957])
-
-Expressions::
-* Check that _value is used in aggregations script before setting value to specialValue {pull}17091[#17091] (issue: {issue}14262[#14262])
-
-Fielddata::
-* Don't cache top level field data for fields that don't exist {pull}14693[#14693]
-
-Geo::
-* Remove .geohash suffix from GeoDistanceQuery and GeoDistanceRangeQuery {pull}15871[#15871] (issue: {issue}15179[#15179])
-* Geo: Allow numeric parameters enclosed in quotes for 'geohash_grid' aggregation {pull}14440[#14440] (issue: {issue}13132[#13132])
-* Resync Geopoint hashCode/equals method {pull}14124[#14124] (issue: {issue}14083[#14083])
-* Fix GeoPointFieldMapper to index geohash at correct precision. {pull}13649[#13649] (issue: {issue}12467[#12467])
-
-Highlighting::
-* Don't override originalQuery with request filters {pull}15793[#15793] (issue: {issue}15689[#15689])
-* Fix spans extraction to not also include individual terms. {pull}15516[#15516] (issues: {issue}13239[#13239], {issue}15291[#15291])
-
-Index APIs::
-* Field stats: Index constraints should remove indices in the response if the field to evaluate is empty {pull}14868[#14868]
-* Field stats: Fix NPE for index constraint on empty index {pull}14841[#14841]
-* Field stats: Added `format` option for index constraints {pull}14823[#14823] (issue: {issue}14804[#14804])
-* Forbid index name `.` and `..` {pull}13862[#13862] (issue: {issue}13858[#13858])
-
-Inner Hits::
-* Query and top level inner hit definitions shouldn't overwrite each other {pull}16222[#16222] (issue: {issue}16218[#16218])
-
-Internal::
-* Log uncaught exceptions from scheduled once tasks {pull}15824[#15824] (issue: {issue}15814[#15814])
-* FunctionScoreQuery should implement two-phase iteration. {pull}15602[#15602]
-* Make sure the remaining delay of unassigned shard is updated with every reroute {pull}14890[#14890] (issue: {issue}14808[#14808])
-* Throw a meaningful error when loading metadata and an alias and index have the same name {pull}14842[#14842] (issue: {issue}14706[#14706])
-* fixup issues with 32-bit jvm {pull}14609[#14609]
-* Failure to update the cluster state with the recovered state should make sure it will be recovered later {pull}14485[#14485]
-* Gateway: a race condition can prevent the initial cluster state from being recovered {pull}13997[#13997]
-* Verify actually written checksum in VerifyingIndexOutput {pull}13848[#13848]
-* An inactive shard is activated by triggered synced flush {pull}13802[#13802]
-* Remove all setAccessible in tests and forbid {pull}13539[#13539]
-* Remove easy uses of setAccessible in tests. {pull}13537[#13537]
-* Ban setAccessible from core code, restore monitoring stats under java 9 {pull}13531[#13531] (issue: {issue}13527[#13527])
-
-Logging::
-* Add missing index name to indexing slow log {pull}17026[#17026] (issue: {issue}17025[#17025])
-* ParseFieldMatcher should log when using deprecated settings. {pull}16988[#16988]
-* Don't log multi-megabyte guice exceptions. {pull}13782[#13782]
-* Moving system property setting to before it can be used {pull}13660[#13660] (issue: {issue}13658[#13658])
-
-Mapping::
-* Put mapping operations must update metadata of all types. {pull}16264[#16264] (issue: {issue}16239[#16239])
-* Fix serialization of `search_analyzer`. {pull}16255[#16255]
-* Reuse metadata mappers for dynamic updates. {pull}16023[#16023] (issue: {issue}15997[#15997])
-* Fix MapperService#searchFilter(...) {pull}15923[#15923] (issue: {issue}15757[#15757])
-* Fix initial sizing of BytesStreamOutput. {pull}15864[#15864] (issue: {issue}15789[#15789])
-* MetaDataMappingService should call MapperService.merge with the original mapping update. {pull}15508[#15508]
-* MapperService: check index.mapper.dynamic during index creation {pull}15424[#15424] (issue: {issue}15381[#15381])
-* Only text fields should accept analyzer and term vector settings. {pull}15308[#15308]
-* Mapper parsers should not check for a `tokenized` property. {pull}15289[#15289]
-* Validate that fields are defined only once. {pull}15243[#15243] (issue: {issue}15057[#15057])
-* Check mapping compatibility up-front. {pull}15175[#15175] (issue: {issue}15049[#15049])
-* Don't treat _default_ as a regular type. {pull}15156[#15156] (issue: {issue}15049[#15049])
-* Don't ignore mapping merge failures. {pull}15144[#15144] (issue: {issue}15049[#15049])
-* Treat mappings at an index-level feature. {pull}15142[#15142]
-* Make _type use doc values {pull}14783[#14783] (issue: {issue}14781[#14781])
-
-Network::
-* Only accept transport requests after node is fully initialized {pull}16746[#16746] (issue: {issue}16723[#16723])
-
-Packaging::
-* Fix waiting for pidfile {pull}16718[#16718] (issue: {issue}16717[#16717])
-* Fix Windows service installation failure {pull}15549[#15549] (issue: {issue}15349[#15349])
-* Enable es_include at init {pull}15173[#15173]
-* Handle system policy correctly {pull}14704[#14704] (issue: {issue}14690[#14690])
-* Startup script exit status should catch daemonized startup failures {pull}14170[#14170] (issue: {issue}14163[#14163])
-* Don't let ubuntu try to install its crazy jayatana agent. {pull}13813[#13813] (issue: {issue}13785[#13785])
-
-Parent/Child::
-* Check that parent_type in Has Parent Query has child types {pull}16923[#16923] (issue: {issue}16692[#16692])
-* Has child query forces default similarity {pull}16611[#16611] (issues: {issue}16550[#16550], {issue}4977[#4977])
-
-Percolator::
-* Don't replace found fields if map unmapped fields as string is enabled {pull}16043[#16043] (issue: {issue}10500[#10500])
-* mpercolate api should serialise start time {pull}15938[#15938] (issue: {issue}15908[#15908])
-
-Plugin Delete By Query::
-* Fix Delete-by-Query with Shield {pull}14658[#14658] (issue: {issue}14527[#14527])
-
-Plugin Discovery GCE::
-* Add setFactory permission to GceDiscoveryPlugin {pull}16860[#16860] (issue: {issue}16485[#16485])
-
-Plugin Mapper Attachment::
-* Fix toXContent() for mapper attachments field {pull}15110[#15110]
-
-Plugin Repository S3::
-* Hack around aws security hole of accessing sun.security.ssl, s3 repository works on java 9 again {pull}13538[#13538] (issue: {issue}432[#432])
-
-Plugins::
-* Fix plugin list command error message {pull}14288[#14288] (issue: {issue}14287[#14287])
-* Fix HTML response during redirection {pull}11374[#11374] (issue: {issue}11370[#11370])
-
-Query DSL::
-* Fix FunctionScore equals/hashCode to include minScore and friends {pull}15676[#15676]
-* Min should match greater than the number of optional clauses should return no result {pull}15571[#15571] (issue: {issue}15521[#15521])
-* Return a better exception message when `regexp` query is used on a numeric field {pull}14910[#14910] (issue: {issue}14782[#14782])
-
-REST::
-* Remove detect_noop from REST spec {pull}16386[#16386]
-* Make text parsing less lenient. {pull}15679[#15679]
-* Throw exception when trying to write map with null keys {pull}15479[#15479] (issue: {issue}14346[#14346])
-* Fix OOM in AbstractXContentParser {pull}15350[#15350] (issue: {issue}15338[#15338])
-* XContentFactory.xContentType: allow for possible UTF-8 BOM for JSON XContentType {pull}14611[#14611] (issue: {issue}14442[#14442])
-* RestUtils.decodeQueryString ignores the URI fragment when parsing a query string {pull}13365[#13365] (issue: {issue}13320[#13320])
-
-Recovery::
-* Try to renew sync ID if `flush=true` on forceMerge {pull}17108[#17108] (issue: {issue}17019[#17019])
-* CancellableThreads should also treat ThreadInterruptedException as InterruptedException {pull}15318[#15318]
-
-Reindex API::
-* Properly register reindex status {pull}17125[#17125]
-* Make search failure cause rest failure {pull}16889[#16889] (issue: {issue}16037[#16037])
-
-Scripting::
-* Add permission to access sun.reflect.MethodAccessorImpl from Groovy scripts {pull}16540[#16540] (issue: {issue}16536[#16536])
-* Security permissions for Groovy closures {pull}16196[#16196] (issues: {issue}16194[#16194], {issue}248[#248])
-
-Search::
-* Do not apply minimum_should_match on auto generated boolean query if the coordination factor is disabled. {pull}16155[#16155]
-* Do not apply minimum-should-match on a boolean query if the coords are disabled {pull}16078[#16078] (issue: {issue}15858[#15858])
-* Fix blended terms take 2 {pull}15894[#15894] (issue: {issue}15860[#15860])
-* Fix NPE when a segment with an empty cache gets closed. {pull}15202[#15202] (issue: {issue}15043[#15043])
-* Fix the quotes in the explain message for a script score function without parameters {pull}11398[#11398]
-
-Settings::
-* TransportClient should use updated setting for initialization of modules and service {pull}16095[#16095]
-* ByteSizeValue.equals should normalize units {pull}13784[#13784]
-
-Snapshot/Restore::
-* Prevent closing index during snapshot restore {pull}16933[#16933] (issue: {issue}16321[#16321])
-* Add node version check to shard allocation during restore {pull}16520[#16520] (issue: {issue}16519[#16519])
-* Snapshot restore and index creates should keep index settings and cluster blocks in sync {pull}13931[#13931] (issue: {issue}13213[#13213])
-* Fix blob size in writeBlob() method {pull}13574[#13574] (issue: {issue}13434[#13434])
-
-Stats::
-* Fix recovery translog stats totals when recovering from store {pull}16493[#16493] (issue: {issue}15974[#15974])
-* Fix calculation of age of pending tasks {pull}15995[#15995] (issue: {issue}15988[#15988])
-* Add extra validation into `cluster/stats` {pull}14699[#14699] (issue: {issue}7390[#7390])
-* Omit current* stats for OldShardStats {pull}13801[#13801] (issue: {issue}13386[#13386])
-
-Task Manager::
-* Fix TaskId#isSet to return true when id is set and not other way around {pull}17307[#17307]
-* Don't wait for completion of list tasks tasks when wait_for_completion flag is set {pull}17231[#17231]
-
-Translog::
-* Never delete translog-N.tlog file when creation fails {pull}15788[#15788]
-* Close recovered translog readers if createWriter fails {pull}15762[#15762] (issue: {issue}15754[#15754])
-* Fail and close translog hard if writing to disk fails {pull}15420[#15420] (issue: {issue}15333[#15333])
-* Prevent writing to closed channel if translog is already closed {pull}15012[#15012] (issue: {issue}14866[#14866])
-* Don't delete temp recovered checkpoint file if it was renamed {pull}14872[#14872] (issue: {issue}14695[#14695])
-* Translog recovery can repeatedly fail if we run out of disk {pull}14695[#14695]
-* Pending operations in the translog prevent shard from being marked as inactive {pull}13759[#13759] (issue: {issue}13707[#13707])
-
-Tribe Node::
-* Passthrough environment and network settings to tribe client nodes {pull}16893[#16893]
-* Tribe node: pass path.conf to inner tribe clients {pull}16258[#16258] (issue: {issue}16253[#16253])
-* Fix tribe node to load config file for internal client nodes {pull}15300[#15300] (issues: {issue}13383[#13383], {issue}14573[#14573])
-
-
-
-[[regression-5.0.0-alpha1-2x]]
-[float]
-=== Regressions
-
-Analysis::
-* Add PathHierarchy type back to path_hierarchy tokenizer for backward compatibility with 1.x {pull}15785[#15785] (issue: {issue}15756[#15756])
-
-Internal::
-* Deduplicate concrete indices after indices resolution {pull}14316[#14316] (issues: {issue}11258[#11258], {issue}12058[#12058])
-
-Plugin Cloud Azure::
-* Filter cloud azure credentials {pull}14863[#14863] (issues: {issue}13779[#13779], {issue}14843[#14843])
-
-REST::
-* Don't return all indices immediately if count of expressions >1 and first expression is * {pull}17033[#17033] (issue: {issue}17027[#17027])
-
-
-
-[[upgrade-5.0.0-alpha1-2x]]
-[float]
-=== Upgrades
-
-Core::
-* Upgrade to Lucene 5.5.0 official release {pull}16742[#16742]
-* Upgrade to lucene 5.5.0-snapshot-850c6c2 {pull}16615[#16615]
-* Upgrade to lucene 5.5.0-snapshot-4de5f1d {pull}16400[#16400] (issues: {issue}16373[#16373], {issue}16399[#16399])
-* Update lucene to r1725675 {pull}16114[#16114]
-* Upgrade to lucene-5.5.0-snapshot-1721183. {pull}15575[#15575]
-* Upgrade Lucene to 5.4.0-snapshot-1715952 {pull}14951[#14951]
-* Upgrade Lucene to 5.4.0-snapshot-1714615 {pull}14784[#14784]
-* Upgrade to lucene-5.4.0-snapshot-1712973. {pull}14619[#14619]
-* update to lucene-5.4.x-snapshot-1711508 {pull}14398[#14398]
-* Upgrade to lucene-5.4-snapshot-1710880. {pull}14320[#14320]
-* Upgrade to lucene-5.4-snapshot-1708254. {pull}14074[#14074]
-* upgrade lucene to r1702265 {pull}13439[#13439]
-* Upgrade master to lucene 5.4-snapshot r1701068 {pull}13324[#13324]
-
-Geo::
-* Update to spatial4j 0.5 for correct Multi-Geometry {pull}14269[#14269] (issue: {issue}9904[#9904])
-
-Internal::
-* Update to Jackson 2.6.2 {pull}13344[#13344] (issues: {issue}10980[#10980], {issue}207[#207], {issue}213[#213])
-
-Plugin Cloud AWS::
-* Update AWS SDK version to 1.10.19 {pull}13655[#13655] (issue: {issue}13656[#13656])
-
-Plugin Cloud Azure::
-* Upgrade Azure Storage client to 4.0.0 {pull}16084[#16084] (issues: {issue}12567[#12567], {issue}15080[#15080], {issue}15976[#15976])
-* Update Azure Service Management API to 0.9.0 {pull}15232[#15232] (issue: {issue}15209[#15209])
-
-Plugin Discovery Azure::
-* Upgrade azure SDK to 0.9.3 {pull}17102[#17102] (issues: {issue}17042[#17042], {issue}557[#557])
-
-Plugin Discovery EC2::
-* Upgrade to aws 1.10.33 {pull}14672[#14672]
-
-Plugin Lang JS::
-* upgrade rhino for plugins/lang-javascript {pull}14466[#14466]
-
-Scripting::
-* Upgrade groovy dependency in lang-groovy module to version 2.4.6 {pull}16830[#16830] (issue: {issue}16527[#16527])
-
-
diff --git a/docs/reference/release-notes/5.0.0-alpha1.asciidoc b/docs/reference/release-notes/5.0.0-alpha1.asciidoc
deleted file mode 100644
index 1793713953..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha1.asciidoc
+++ /dev/null
@@ -1,810 +0,0 @@
-[[release-notes-5.0.0-alpha1]]
-== 5.0.0-alpha1 Release Notes
-
-The changes listed below have been released for the first time in
-Elasticsearch 5.0.0-alpha1. Changes in this release which were first released
-in the 2.x series are listed in <<release-notes-5.0.0-alpha1-2x>>.
-
-Also see <<breaking-changes-5.0>>.
-
-IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha1 to any other version is not supported.
-
-[[breaking-5.0.0-alpha1]]
-[float]
-=== Breaking changes
-
-Aliases::
-* make get alias expand to open and closed indices by default {pull}15954[#15954] (issue: {issue}14982[#14982])
-* Remove deprecated indices.get_aliases {pull}13906[#13906]
-
-Allocation::
-* Remove DisableAllocationDecider {pull}13313[#13313]
-
-CAT API::
-* Add raw recovery progress to cat recovery API {pull}17064[#17064] (issue: {issue}17022[#17022])
-* Remove host from cat nodes API {pull}16656[#16656] (issues: {issue}12959[#12959], {issue}16575[#16575])
-* Using the accept header in the request instead of content-type in _cat API. {pull}14421[#14421] (issue: {issue}14195[#14195])
-
-CRUD::
-* Remove object notation for core types. {pull}15684[#15684] (issue: {issue}15388[#15388])
-
-Cache::
-* Remove deprecated query cache settings {pull}15592[#15592]
-
-Cluster::
-* Remove memory section {pull}17278[#17278] (issues: {issue}12049[#12049], {issue}16756[#16756])
-
-Core::
-* Bootstrap does not set system properties {pull}17088[#17088] (issues: {issue}16579[#16579], {issue}16791[#16791])
-* Add max number of processes check {pull}16919[#16919]
-* Add mlockall bootstrap check {pull}16909[#16909]
-* One log {pull}16703[#16703] (issue: {issue}16585[#16585])
-
-Engine::
-* Remove `index.compound_on_flush` setting and default to `true` {pull}15594[#15594] (issue: {issue}10778[#10778])
-
-Fielddata::
-* Remove "uninverted" and "binary" fielddata support for numeric and boolean fields. {pull}14082[#14082]
-
-Index APIs::
-* Remove `GET` option for /_forcemerge {pull}15223[#15223] (issue: {issue}15165[#15165])
-* Remove /_optimize REST API endpoint {pull}14226[#14226] (issue: {issue}13778[#13778])
-
-Internal::
-* Remove support for pre 2.0 indices {pull}13799[#13799]
-
-Mapping::
-* Change the field mapping index time boost into a query time boost. {pull}16900[#16900]
-* Deprecate string in favor of text/keyword. {pull}16877[#16877]
-* Term vector APIs should no longer update mappings {pull}16285[#16285]
-* Remove the `format` option of the `_source` field. {pull}15398[#15398]
-* Remove transform {pull}13657[#13657] (issue: {issue}12674[#12674])
-
-Parent/Child::
-* Removed `total` score mode in favour for `sum` score mode. {pull}17174[#17174] (issues: {issue}13470[#13470], {issue}17083[#17083])
-* Removed pre 2.x parent child implementation {pull}13376[#13376]
-
-Percolator::
-* Change the percolate api to not dynamically add fields to mapping {pull}16077[#16077] (issue: {issue}15751[#15751])
-
-Plugins::
-* Rename bin/plugin in bin/elasticsearch-plugin {pull}16454[#16454]
-* Remove multicast plugin {pull}16326[#16326] (issue: {issue}16310[#16310])
-* Plugins: Remove site plugins {pull}16038[#16038]
-
-Query DSL::
-* Function score query: remove deprecated support for boost_factor {pull}13510[#13510]
-* Remove support for deprecated queries. {pull}13418[#13418] (issue: {issue}13326[#13326])
-
-REST::
-* Limit the accepted length of the _id {pull}16036[#16036] (issue: {issue}16034[#16034])
-
-Search::
-* Remove deprecated reverse option from sorting {pull}17282[#17282] (issue: {issue}17047[#17047])
-* Remove some deprecations {pull}14331[#14331]
-* Remove search exists api {pull}13911[#13911] (issues: {issue}13682[#13682], {issue}13910[#13910])
-* Remove the scan and count search types. {pull}13310[#13310]
-
-Search Refactoring::
-* Remove "query" query and fix related parsing bugs {pull}14304[#14304] (issue: {issue}13326[#13326])
-
-Settings::
-* Remove ability to specify arbitrary node attributes with `node.` prefix {pull}17402[#17402] (issue: {issue}17280[#17280])
-* Enforce `discovery.zen.minimum_master_nodes` is set when bound to a public ip {pull}17288[#17288]
-* Prevent index level setting from being configured on a node level {pull}17144[#17144] (issue: {issue}16799[#16799])
-* Remove support for node.client setting {pull}16963[#16963] (issue: {issue}16565[#16565])
-* Remove es.max-open-files flag {pull}16757[#16757] (issues: {issue}16506[#16506], {issue}483[#483])
-* Enforce node level limits if node is started in production env {pull}16733[#16733] (issue: {issue}16727[#16727])
-* Make settings validation strict {pull}16365[#16365]
-* Remove the ability to fsync on every operation and only schedule fsync task if really needed {pull}16257[#16257] (issue: {issue}16152[#16152])
-* Script settings {pull}16197[#16197]
-* Remove index.flush_on_close entirely {pull}15977[#15977]
-* Restore chunksize of 512kb on recovery and remove configurability {pull}15235[#15235] (issue: {issue}15161[#15161])
-* Remove ancient deprecated and alternative recovery settings {pull}15234[#15234]
-* Fix ping timeout settings inconsistencies {pull}13701[#13701] (issue: {issue}6579[#6579])
-
-Similarities::
-* Renames `default` similarity into `classic` {pull}15446[#15446] (issue: {issue}15102[#15102])
-
-Snapshot/Restore::
-* Fail closing or deleting indices during a full snapshot {pull}17021[#17021] (issue: {issue}16321[#16321])
-
-Stats::
-* Modify load average format {pull}15932[#15932] (issue: {issue}15907[#15907])
-* Reintroduce five-minute and fifteen-minute load averages on Linux {pull}15907[#15907] (issues: {issue}12049[#12049], {issue}14741[#14741])
-* Add system CPU percent to OS stats {pull}14741[#14741]
-
-Term Vectors::
-* Remove DFS support from TermVector API {pull}16452[#16452]
-
-Translog::
-* Drop support for simple translog and hard-wire buffer to 8kb {pull}15574[#15574]
-* Simplify translog-based flush settings {pull}15573[#15573]
-
-Warmers::
-* Remove query warmers and the warmer API. {pull}15614[#15614] (issue: {issue}15607[#15607])
-
-[[breaking-java-5.0.0-alpha1]]
-[float]
-=== Breaking Java changes
-
-Aggregations::
-* getKeyAsString and key_as_string should be the same for terms aggregation on boolean field {pull}15393[#15393]
-
-Allocation::
-* Simplify shard balancer interface {pull}17028[#17028] (issue: {issue}8954[#8954])
-
-Analysis::
-* Simplify Analysis registration and configuration {pull}14355[#14355]
-
-Cache::
-* Refactor IndicesRequestCache to make it testable. {pull}16610[#16610]
-* Fold IndexCacheModule into IndexModule {pull}14293[#14293]
-
-Core::
-* Remove es.useLinkedTransferQueue {pull}16786[#16786]
-
-Discovery::
-* Include pings from client nodes in master election {pull}17329[#17329] (issue: {issue}17325[#17325])
-
-Internal::
-* Remove duplicate getters from DiscoveryNode and DiscoveryNodes {pull}17410[#17410] (issue: {issue}16963[#16963])
-* Cli: Switch to jopt-simple {pull}17024[#17024] (issue: {issue}11564[#11564])
-* Replace ContextAndHeaders with a ThreadPool based ThreadLocal implementation {pull}15776[#15776]
-* Remove NodeBuilder {pull}15354[#15354]
-* Fix IndexSearcherWrapper interface to not depend on the EngineConfig {pull}14654[#14654]
-* Cleanup query parsing and remove IndexQueryParserService {pull}14452[#14452]
-* Remove circular dependency between IndicesService and IndicesStore {pull}14285[#14285]
-* Remove guice injection from IndexStore and friends {pull}14279[#14279]
-* Replace IndicesLifecycle with a per-index IndexEventListener {pull}14217[#14217] (issue: {issue}13259[#13259])
-* Simplify similarity module and friends {pull}13942[#13942]
-* Refactor SearchRequest to be parsed on the coordinating node {pull}13859[#13859]
-
-Java API::
-* Remove the count api {pull}14166[#14166] (issue: {issue}13928[#13928])
-* IdsQueryBuilder to accept only non null ids and types {pull}13937[#13937]
-
-Mapping::
-* [Mapping] Several MappingService cleanups {pull}16133[#16133] (issue: {issue}15924[#15924])
-
-Network::
-* Remove ability to disable Netty gathering writes {pull}16774[#16774] (issue: {issue}7811[#7811])
-
-Parent/Child::
-* Cleanup ParentFieldMapper {pull}16045[#16045]
-* Several other parent/child cleanups {pull}13470[#13470]
-
-Plugins::
-* Enforce isolated mode for all plugins {pull}17276[#17276]
-* Change the inner structure of the plugins zip {pull}16453[#16453]
-* Don't use guice for QueryParsers {pull}15761[#15761]
-* Remove guice from the index level {pull}14518[#14518]
-* Remove shard-level injector {pull}13881[#13881]
-
-Query DSL::
-* Remove the MissingQueryBuilder which was deprecated in 2.2.0. {pull}15364[#15364] (issue: {issue}14112[#14112])
-* Remove NotQueryBuilder {pull}14204[#14204] (issue: {issue}13761[#13761])
-
-Search::
-* Query refactoring: split parse phase into fromXContent and toQuery for all queries {pull}13788[#13788] (issue: {issue}10217[#10217])
-
-Search Refactoring::
-* Refactored inner hits parsing and intoduced InnerHitBuilder {pull}17291[#17291]
-* Remove deprecated parameter from field sort builder. {pull}16573[#16573] (issue: {issue}16127[#16127])
-* Remove support for query_binary and filter_binary {pull}14433[#14433] (issue: {issue}14308[#14308])
-* Validate query api: move query parsing to the coordinating node {pull}14384[#14384]
-
-Settings::
-* Move remaining settings in NettyHttpServerTransport to the new infra {pull}16531[#16531]
-* Replace IndexSettings annotation with a full-fledged class {pull}14251[#14251]
-
-Store::
-* Standardize state format type for global and index level metadata {pull}17123[#17123]
-
-Suggesters::
-* Remove suggest threadpool {pull}17304[#17304] (issue: {issue}17198[#17198])
-* Remove suggest transport action {pull}17198[#17198] (issue: {issue}10217[#10217])
-
-[[deprecation-5.0.0-alpha1]]
-[float]
-=== Deprecations
-
-Plugin Mapper Attachment::
-* Deprecate mapper-attachments plugin {pull}16948[#16948] (issue: {issue}16910[#16910])
-
-Search::
-* Deprecate fuzzy query {pull}16211[#16211] (issues: {issue}15760[#15760], {issue}16121[#16121])
-
-
-
-[[feature-5.0.0-alpha1]]
-[float]
-=== New features
-
-Allocation::
-* Add API to explain why a shard is or isn't assigned {pull}17305[#17305] (issue: {issue}14593[#14593])
-
-Discovery::
-* Add two phased commit to Cluster State publishing {pull}13062[#13062]
-
-Ingest::
-* Merge feature/ingest branch into master branch {pull}16049[#16049] (issue: {issue}14049[#14049])
-
-Mapping::
-* Add a text field. {pull}16637[#16637]
-* Add a new `keyword` field. {pull}16589[#16589]
-
-Percolator::
-* index the query terms from the percolator query {pull}13646[#13646] (issue: {issue}12664[#12664])
-
-Plugin Ingest Attachment::
-* Ingest: Add attachment processor {pull}16490[#16490] (issue: {issue}16303[#16303])
-
-Plugin Mapper Attachment::
-* Migrate mapper attachments plugin to main repository {pull}14605[#14605]
-
-Plugin Repository HDFS::
-* HDFS Snapshot/Restore plugin {pull}15192[#15192] (issue: {issue}15191[#15191])
-
-Query DSL::
-* Adds a rewrite phase to queries on the shard level {pull}16870[#16870] (issue: {issue}9526[#9526])
-
-Reindex API::
-* Merge reindex to master {pull}16861[#16861]
-
-Scripting::
-* Exceptions and Infinite Loop Checking {pull}15936[#15936]
-* Added a new scripting language (PlanA) {pull}15136[#15136] (issue: {issue}13084[#13084])
-
-Search::
-* Add `search_after` parameter in the SearchAPI {pull}16125[#16125] (issue: {issue}8192[#8192])
-
-Settings::
-* Add infrastructure to transactionally apply and reset dynamic settings {pull}15278[#15278]
-
-Stats::
-* API for listing index file sizes {pull}16661[#16661] (issue: {issue}16131[#16131])
-
-Suggesters::
-* Add document-oriented completion suggester {pull}14410[#14410] (issue: {issue}10746[#10746])
-
-Task Manager::
-* Add task cancellation mechanism {pull}16320[#16320]
-* Make the Task object available to the action caller {pull}16033[#16033]
-* Task Management: Add framework for registering and communicating with tasks {pull}15347[#15347] (issue: {issue}15117[#15117])
-
-
-
-[[enhancement-5.0.0-alpha1]]
-[float]
-=== Enhancements
-
-Aggregations::
-* Add tests and documentation for using `time_zone` in date range aggregation {pull}16955[#16955] (issue: {issue}10130[#10130])
-* Fixes serialisation of Ranges {pull}16674[#16674]
-
-Allocation::
-* Write shard state metadata as soon as shard is created / initializing {pull}16625[#16625] (issue: {issue}14739[#14739])
-* Reuse existing allocation id for primary shard allocation {pull}16530[#16530] (issue: {issue}14739[#14739])
-* Remove version in ShardRouting (now obsolete) {pull}16243[#16243] (issue: {issue}14739[#14739])
-* Prefer nodes that previously held primary shard for primary shard allocation {pull}16096[#16096] (issue: {issue}14739[#14739])
-* Extend reroute with an option to force assign stale primary shard copies {pull}15708[#15708] (issue: {issue}14739[#14739])
-* Allocate primary shards based on allocation IDs {pull}15281[#15281] (issue: {issue}14739[#14739])
-* Persist currently started allocation IDs to index metadata {pull}14964[#14964] (issue: {issue}14739[#14739])
-* Use ObjectParser to parse AllocationID {pull}14962[#14962] (issue: {issue}14831[#14831])
-* Persist allocation ID with shard state metadata on nodes {pull}14831[#14831] (issue: {issue}14739[#14739])
-
-Analysis::
-* Improve error message if resource files have illegal encoding {pull}17237[#17237] (issue: {issue}17212[#17212])
-
-CAT API::
-* Expose http address in cat/nodes {pull}16770[#16770]
-* [cat/recovery] Make recovery time a TimeValue() {pull}16743[#16743] (issue: {issue}9209[#9209])
-* :CAT API: remove space at the end of a line {pull}15250[#15250] (issue: {issue}9464[#9464])
-
-CRUD::
-* CRUD: Allow to get and set ttl as a time value/string {pull}15047[#15047]
-
-Cache::
-* Enable the indices request cache by default {pull}17162[#17162] (issues: {issue}16870[#16870], {issue}17134[#17134])
-
-Cluster::
-* Cluster Health should run on applied states, even if waitFor=0 {pull}17440[#17440]
-* Resolve index names to Index instances early {pull}17048[#17048]
-* Remove DiscoveryNode#shouldConnectTo method {pull}16898[#16898] (issue: {issue}16815[#16815])
-* Fail demoted primary shards and retry request {pull}16415[#16415] (issue: {issue}14252[#14252])
-* Illegal shard failure requests {pull}16275[#16275]
-* Shard failure requests for non-existent shards {pull}16089[#16089] (issue: {issue}14252[#14252])
-* Add handling of channel failures when starting a shard {pull}16041[#16041] (issue: {issue}15895[#15895])
-* Wait for new master when failing shard {pull}15748[#15748] (issue: {issue}14252[#14252])
-* Master should wait on cluster state publication when failing a shard {pull}15468[#15468] (issue: {issue}14252[#14252])
-* Split cluster state update tasks into roles {pull}14899[#14899] (issue: {issue}13627[#13627])
-* Add timeout mechanism for sending shard failures {pull}14707[#14707] (issue: {issue}14252[#14252])
-* Add listener mechanism for failures to send shard failed {pull}14295[#14295] (issue: {issue}14252[#14252])
-
-Core::
-* Remove PROTOTYPE from BulkItemResponse.Failure {pull}17433[#17433] (issue: {issue}17086[#17086])
-* Throw an exception if Writeable.Reader reads null {pull}17332[#17332]
-* Remove PROTOTYPE from RescorerBuilders {pull}17330[#17330]
-* Port Primary Terms to master {pull}17044[#17044] (issues: {issue}14062[#14062], {issue}14651[#14651], {issue}17038[#17038])
-* Use index UUID to lookup indices on IndicesService {pull}17001[#17001]
-* Add -XX+AlwaysPreTouch JVM flag {pull}16937[#16937]
-* Add max size virtual memory check {pull}16935[#16935]
-* Use and test relative time in TransportBulkAction {pull}16916[#16916]
-* Bump Elasticsearch version to 5.0.0-SNAPSHOT {pull}16862[#16862]
-* Assert that we can write in all data-path on startup {pull}16745[#16745]
-* Add G1GC check on startup {pull}16737[#16737] (issue: {issue}10740[#10740])
-* Shards with heavy indexing should get more of the indexing buffer {pull}14121[#14121]
-* Remove and ban ImmutableMap {pull}13939[#13939] (issue: {issue}13224[#13224])
-* Finish banning ImmutableSet {pull}13820[#13820] (issue: {issue}13224[#13224])
-* Removes and bans ImmutableSet {pull}13754[#13754] (issue: {issue}13224[#13224])
-* Remove and ban ImmutableMap#entrySet {pull}13724[#13724]
-* Forbid ForwardingSet {pull}13720[#13720] (issue: {issue}13224[#13224])
-
-Discovery::
-* Add a dedicate queue for incoming ClusterStates {pull}13303[#13303] (issue: {issue}13062[#13062])
-
-Engine::
-* Remove writeLockTimeout from InternalEngine {pull}16930[#16930]
-* Don't guard IndexShard#refresh calls by a check to isRefreshNeeded {pull}16118[#16118]
-* Never call a listerner under lock in InternalEngine {pull}15786[#15786]
-* Use System.nanoTime() to initialize Engine.lastWriteNanos {pull}14321[#14321]
-* Flush big merges automatically if shard is inactive {pull}14275[#14275]
-* Remove Engine.Create {pull}13955[#13955]
-* Remove the disabled autogenerated id optimization from InternalEngine {pull}13857[#13857]
-
-Exceptions::
-* Fix typos in exception/assert/log messages in core module. {pull}16649[#16649]
-* Add field names to several mapping errors {pull}16508[#16508] (issue: {issue}16378[#16378])
-* Add serialization support for more important IOExceptions {pull}15766[#15766]
-* Adds exception objects to log messages. {pull}14827[#14827] (issue: {issue}10021[#10021])
-* Add stack traces to logged exceptions where missing {pull}13825[#13825] (issue: {issue}10021[#10021])
-* Remove reflection hacks from ElasticsearchException {pull}13796[#13796]
-* Rename QueryParsingException to a more generic ParsingException {pull}13631[#13631]
-* Add *Exception(Throwable cause) constructors/ call where appropriate {pull}13544[#13544] (issue: {issue}10021[#10021])
-
-Geo::
-* Fix a potential parsing problem in GeoDistanceSortParser {pull}17111[#17111]
-* Geo: Add validation of shapes to ShapeBuilders {pull}15551[#15551] (issue: {issue}14416[#14416])
-* Make remaining ShapeBuilders implement Writeable {pull}15010[#15010] (issue: {issue}14416[#14416])
-* Geo: Remove internal `translated` flag from LineStringBuilder {pull}14969[#14969]
-* Make PointBuilder, CircleBuilder & EnvelopeBuilder implement Writable {pull}14933[#14933] (issue: {issue}14416[#14416])
-* Merging BaseLineString and BasePolygonBuilder with subclass {pull}14887[#14887] (issue: {issue}14482[#14482])
-* Moving static factory methods to ShapeBuilders {pull}14529[#14529]
-* Remove InternalLineStringBuilder and InternalPolygonBuilder {pull}14482[#14482] (issue: {issue}14416[#14416])
-
-Highlighting::
-* Switch Highlighting to ObjectParser {pull}17363[#17363]
-* Use HighlightBuilder in SearchSourceBuilder {pull}15376[#15376] (issue: {issue}15044[#15044])
-* Joint parsing of common global Hightlighter and subfield parameters {pull}15368[#15368] (issue: {issue}15285[#15285])
-* Enable HighlightBuilder to create SearchContextHighlight {pull}15324[#15324]
-* Add fromXContent method to HighlightBuilder {pull}15157[#15157]
-
-Ingest::
-* add automatic type conversion support to ConvertProcessor {pull}17263[#17263] (issue: {issue}17139[#17139])
-* Give the foreach processor access to the rest of the document {pull}17172[#17172] (issue: {issue}17147[#17147])
-* Added ingest statistics to node stats API {pull}16915[#16915]
-* Add `ingest_took` to bulk response {pull}16876[#16876]
-* Add ingest info to node info API, which contains a list of available processors {pull}16865[#16865]
-* Use diffs for ingest metadata in cluster state {pull}16847[#16847]
-* hide null-valued metadata fields from WriteableIngestDocument#toXContent {pull}16557[#16557]
-* Ingest: use bulk thread pool for bulk request processing (was index before) {pull}16539[#16539] (issue: {issue}16503[#16503])
-* Add foreach processor {pull}16432[#16432]
-* revert PipelineFactoryError handling with throwing ElasticsearchParseException in ingest pipeline creation {pull}16355[#16355]
-* Add processor tags to on_failure metadata in ingest pipeline {pull}16324[#16324] (issue: {issue}16202[#16202])
-* catch processor/pipeline factory exceptions and return structured error responses {pull}16276[#16276] (issue: {issue}16010[#16010])
-* Ingest: move get/put/delete pipeline methods to ClusterAdminClient {pull}16242[#16242]
-* Geoip processor: remove redundant latitude and longitude fields and make location an object with lat and lon subfields {pull}16173[#16173]
-
-Internal::
-* Remove PROTOTYPE from MLT.Item {pull}17481[#17481] (issue: {issue}17085[#17085])
-* Remove PROTOTYPE from VersionType {pull}17480[#17480] (issue: {issue}17085[#17085])
-* Remove PROTOTYPEs from highlighting {pull}17466[#17466] (issue: {issue}17085[#17085])
-* Remove PROTOTYPEs from ingest {pull}17434[#17434] (issue: {issue}17085[#17085])
-* Start to rework query registration {pull}17424[#17424]
-* Factor out slow logs into Search and IndexingOperationListeners {pull}17398[#17398]
-* Remove PROTOTYPE from Suggesters {pull}17370[#17370]
-* Remove PROTOTYPE from SortBuilders {pull}17337[#17337] (issue: {issue}17085[#17085])
-* Remove PROTOTYPE from ShapeBuilders {pull}17336[#17336] (issue: {issue}17085[#17085])
-* Replace FieldStatsProvider with a method on MappedFieldType. {pull}17334[#17334]
-* Stop using PROTOTYPE in NamedWriteableRegistry {pull}17284[#17284] (issue: {issue}17085[#17085])
-* Support scheduled commands in current context {pull}17077[#17077]
-* Thread limits {pull}17003[#17003]
-* Remove leniency from segments info integrity checks {pull}16985[#16985] (issue: {issue}16973[#16973])
-* Rename SearchServiceTransportAction to SearchTransportService {pull}16880[#16880]
-* Decouple the TransportService and ClusterService {pull}16872[#16872] (issue: {issue}16788[#16788])
-* Refactor bootstrap checks {pull}16844[#16844] (issues: {issue}16733[#16733], {issue}16835[#16835])
-* Add LifecycleRunnable {pull}16752[#16752]
-* Hot inlined methods in your area {pull}16725[#16725]
-* Move IndicesQueryCache and IndicesRequestCache into IndicesService {pull}16603[#16603]
-* Forbid use of java.security.MessageDigest#clone() {pull}16543[#16543] (issue: {issue}16479[#16479])
-* Make IndicesWarmer a private class of IndexService {pull}16470[#16470]
-* Simplify IndicesFieldDataCache and detach from guice {pull}16469[#16469]
-* Uppercase ells ('L') in long literals {pull}16329[#16329] (issue: {issue}16279[#16279])
-* ShardId equality and hash code inconsistency {pull}16319[#16319] (issue: {issue}16217[#16217])
-* Ensure all resources are closed on Node#close() {pull}16316[#16316] (issue: {issue}13685[#13685])
-* Make index uuid available in Index, ShardRouting & ShardId {pull}16217[#16217]
-* Move RefreshTask into IndexService and use since task per index {pull}15933[#15933]
-* Make IndexingMemoryController private to IndicesService {pull}15877[#15877]
-* Cleanup IndexingOperationListeners infrastructure {pull}15875[#15875]
-* Remove and forbid use of j.u.c.ThreadLocalRandom {pull}15862[#15862] (issue: {issue}15294[#15294])
-* Fix IntelliJ query builder type inference issues {pull}15429[#15429]
-* Remove and forbid use of Collections#shuffle(List) and Random#<init>() {pull}15299[#15299] (issue: {issue}15287[#15287])
-* Remove and forbid use of the type-unsafe empty Collections fields {pull}15187[#15187]
-* Move IndicesService.canDeleteShardContent to use IndexSettings {pull}15150[#15150] (issue: {issue}15059[#15059])
-* Simplify MonitorService construction and detach from guice {pull}15035[#15035]
-* Use Supplier for StreamInput#readOptionalStreamable {pull}14806[#14806]
-* Add variable-length long encoding {pull}14780[#14780]
-* Extend usage of IndexSetting class {pull}14731[#14731] (issue: {issue}14251[#14251])
-* Fold SimilarityModule into IndexModule {pull}14284[#14284]
-* Move to lucene BoostQuery {pull}14264[#14264]
-* Use built-in method for computing hash code of longs {pull}14213[#14213]
-* Refactor ShardFailure listener infrastructure {pull}14206[#14206]
-* Add methods for variable-length encoding integral arrays {pull}14087[#14087]
-* Fold IndexAliasesService into IndexService {pull}14044[#14044]
-* Remove unneeded Module abstractions {pull}13944[#13944]
-* Query refactoring: simplify IndexQueryParserService parse methods {pull}13938[#13938] (issue: {issue}13859[#13859])
-* Remove and forbid use of com.google.common.collect.Iterators {pull}13916[#13916] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.ImmutableCollection {pull}13909[#13909] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.io.Resources {pull}13908[#13908] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.hash.* {pull}13907[#13907] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.net.InetAddresses {pull}13905[#13905] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.EvictingQueue {pull}13903[#13903] (issue: {issue}13224[#13224])
-* Replace Guava cache with simple concurrent LRU cache {pull}13879[#13879]
-* Remove ClusterSerivce and IndexSettingsService dependency from IndexShard {pull}13853[#13853]
-* Start making RecoverySourceHandler unittestable {pull}13840[#13840]
-* Remove IndexService dep. from IndexShard {pull}13797[#13797]
-* Remove ES internal deletion policies in favour of Lucenes implementations {pull}13794[#13794]
-* Move ShardTermVectorService to be on indices level as TermVectorService {pull}13786[#13786]
-* Move ShardPercolateService creation into IndexShard {pull}13777[#13777]
-* Remove `ExpressionScriptCompilationException` and `ExpressionScriptExecutionException` {pull}13742[#13742]
-* Reduced the number of ClusterStateUpdateTask variants {pull}13735[#13735]
-* Add a BaseParser helper for stream parsing {pull}13615[#13615]
-* Remove and forbid use of com.google.common.primitives.Ints {pull}13596[#13596] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.math.LongMath {pull}13575[#13575] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.base.Joiner {pull}13572[#13572] (issue: {issue}13224[#13224])
-* Replace and ban next batch of Guava classes {pull}13562[#13562] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.Iterables {pull}13559[#13559] (issue: {issue}13224[#13224])
-* Replace LoadingCache usage with a simple ConcurrentHashMap {pull}13552[#13552] (issue: {issue}13224[#13224])
-* Use Supplier instead of Reflection {pull}13545[#13545]
-* Remove and forbid use of com.google.common.base.Preconditions {pull}13540[#13540] (issue: {issue}13224[#13224])
-* Remove and forbid use of guava Function, Charsets, Collections2 {pull}13533[#13533] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.ImmutableSortedMap {pull}13525[#13525] (issue: {issue}13224[#13224])
-* Remove and forbid use of several com.google.common.util. classes {pull}13524[#13524] (issue: {issue}13224[#13224])
-* Cleanup SearchRequest & SearchRequestBuilder {pull}13518[#13518]
-* Remove and forbid use of com.google.common.collect.Queues {pull}13498[#13498] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.base.Preconditions#checkNotNull {pull}13493[#13493] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.Sets {pull}13463[#13463] (issue: {issue}13224[#13224])
-* Remove and forbid use of com.google.common.collect.Maps {pull}13438[#13438] (issue: {issue}13224[#13224])
-* Remove use of underscore as an identifier {pull}13353[#13353]
-* Remove and forbid the use of com.google.common.base.Predicate(s)? {pull}13349[#13349] (issues: {issue}13224[#13224], {issue}13314[#13314])
-* This commit removes com.google.common.io {pull}13302[#13302] (issue: {issue}13224[#13224])
-
-Java API::
-* Remove copy constructors from request classes and TransportMessage type {pull}16640[#16640] (issue: {issue}15776[#15776])
-
-Mapping::
-* Add a soft limit on the mapping depth. {pull}17400[#17400]
-* Disable fielddata on text fields by defaults. {pull}17386[#17386]
-* Add limit to total number of fields in mapping {pull}17357[#17357]
-* Make `parseMultiField` part of `parseField`. {pull}17313[#17313]
-* Automatically add a sub keyword field to string dynamic mappings. {pull}17188[#17188]
-* Remove friction from the mapping changes in 5.0. {pull}16991[#16991]
-* Rework norms parameters for 5.0. {pull}16987[#16987]
-* Moved dynamic field handling in doc parsing to end of parsing {pull}16798[#16798]
-* Remove the MapperBuilders utility class. {pull}16609[#16609]
-* Make the `index` property a boolean. {pull}16161[#16161]
-* Remove the ability to enable doc values with the `fielddata.format` setting. {pull}16147[#16147]
-* Be stricter about parsing boolean values in mappings. {pull}16146[#16146]
-* Fix default doc values to be enabled when a field is not indexed. {pull}16141[#16141]
-* Dynamically map floating-point numbers as floats instead of doubles. {pull}15319[#15319] (issue: {issue}13851[#13851])
-* Simplify MetaDataMappingService. {pull}15217[#15217]
-* Remove MergeMappingException. {pull}15177[#15177]
-
-Packaging::
-* Added RPM metadata {pull}17477[#17477]
-* Elasticsearch ownership for data, logs, and configs {pull}17197[#17197] (issue: {issue}12688[#12688])
-* Fail early on JDK with compiler bug {pull}16418[#16418] (issues: {issue}16097[#16097], {issue}16362[#16362])
-* Make security non-optional {pull}16176[#16176]
-* Remove RuntimePermission("accessDeclaredMembers") {pull}15378[#15378]
-* Remove Guava as a dependency {pull}14055[#14055] (issue: {issue}13224[#13224])
-* Remove Guava as a dependency {pull}14054[#14054] (issue: {issue}13224[#13224])
-
-Percolator::
-* Add scoring support to the percolator query {pull}17385[#17385] (issue: {issue}13827[#13827])
-* Add query extract support for the blended term query and the common terms query {pull}17347[#17347]
-* Add support for several span queries in ExtractQueryTermsService {pull}17323[#17323]
-* Add support for TermsQuery in ExtractQueryTermsService {pull}17316[#17316]
-* Replace percolate APIs with a percolator query {pull}16349[#16349] (issues: {issue}10741[#10741], {issue}11264[#11264], {issue}13176[#13176], {issue}13978[#13978], {issue}4317[#4317], {issue}7297[#7297])
-
-Plugin Analysis Kuromoji::
-* Add nbest options and NumberFilter {pull}17173[#17173]
-
-Plugin Discovery EC2::
-* Add support for proxy authentication for s3 and ec2 {pull}15293[#15293] (issue: {issue}15268[#15268])
-
-Plugin Ingest Attachment::
-* Minor attachment processor improvements {pull}16574[#16574]
-
-Plugin Lang Painless::
-* Painless Clean Up {pull}17428[#17428]
-* Make Painless a Module {pull}16755[#16755]
-* Minor Clean up {pull}16457[#16457]
-* Remove Extra String Concat Token {pull}16382[#16382]
-
-Plugin Mapper Attachment::
-* minor attachments cleanups: IDE test support and EPUB format {pull}14626[#14626]
-
-Plugin Repository Azure::
-* Support global `repositories.azure.` settings {pull}15141[#15141] (issue: {issue}13776[#13776])
-* Add timeout settings (default to 5 minutes) {pull}15080[#15080] (issue: {issue}14277[#14277])
-* Remove AbstractLegacyBlobContainer {pull}14650[#14650] (issue: {issue}13434[#13434])
-
-Plugin Repository HDFS::
-* merge current hdfs improvements to master {pull}15588[#15588]
-
-Plugin Repository S3::
-* Check that S3 setting `buffer_size` is always lower than `chunk_size` {pull}17274[#17274] (issue: {issue}17244[#17244])
-
-Plugins::
-* PluginManager: Add xpack as official plugin {pull}17227[#17227]
-* CliTool: Cleanup and document Terminal {pull}16443[#16443]
-* Plugin cli: Improve maven coordinates detection {pull}16384[#16384] (issue: {issue}16376[#16376])
-* Enforce plugin zip does not contain zip entries outside of the plugin dir {pull}16361[#16361]
-* CliTool: Allow unexpected exceptions to propagate {pull}16359[#16359]
-* Reduce complexity of plugin cli {pull}16336[#16336]
-* Remove Plugin.onIndexService. {pull}15029[#15029] (issue: {issue}14896[#14896])
-* Open up QueryCache and SearcherWrapper extension points {pull}14303[#14303]
-
-Query DSL::
-* An `exists` query on an object should query a single term. {pull}17186[#17186] (issue: {issue}17131[#17131])
-* Function Score Query: make parsing stricter {pull}16617[#16617] (issue: {issue}16583[#16583])
-* Parsers should throw exception on unknown objects {pull}14255[#14255] (issue: {issue}10974[#10974])
-* UNICODE_CHARACTER_CLASS fix {pull}11598[#11598] (issue: {issue}10146[#10146])
-
-Query Refactoring::
-* Add infrastructure to rewrite query builders {pull}16599[#16599]
-* Switch geo validation to enum {pull}13672[#13672] (issue: {issue}13608[#13608])
-
-REST::
-* More robust handling of CORS HTTP Access Control {pull}16092[#16092]
-* Add option to exclude based on paths in XContent {pull}16017[#16017]
-
-Recovery::
-* Recover broken IndexMetaData as closed {pull}17187[#17187]
-* Relocation source should be marked as relocating before starting recovery to primary relocation target {pull}16500[#16500]
-* Operation counter for IndexShard {pull}15956[#15956] (issue: {issue}15900[#15900])
-* Primary relocation handoff {pull}15900[#15900] (issue: {issue}15532[#15532])
-* Remove recovery threadpools and throttle outgoing recoveries on the master {pull}15372[#15372]
-* Refactor StoreRecoveryService to be a simple package private util class {pull}13766[#13766]
-
-Reindex API::
-* Make reindex throttling dynamic {pull}17262[#17262]
-* Throttling support for reindex {pull}17039[#17039]
-* Add ingest pipeline support to reindex {pull}16932[#16932]
-
-Scripting::
-* Skipping hidden files compilation for script service {pull}16286[#16286] (issue: {issue}15269[#15269])
-* Rename Plan A to Painless {pull}16245[#16245]
-* Add plumbing for script compile-time parameters {pull}15464[#15464]
-* Factor mustache -> modules/lang-mustache {pull}15328[#15328]
-
-Search::
-* Add a soft limit on the number of shards that can be queried in a single search request. {pull}17396[#17396]
-* Type filters should not have a performance impact when there is a single type. {pull}17350[#17350]
-* Store _all payloads on 1 byte instead of 4. {pull}16899[#16899]
-* Refuse to load fields from _source when using the `fields` option and support wildcards. {pull}15017[#15017] (issues: {issue}10783[#10783], {issue}14489[#14489])
-* Add response into ClearScrollResponse {pull}13835[#13835] (issue: {issue}13817[#13817])
-* Shuffle shards for _only_nodes + support multiple specifications like cluster API {pull}12575[#12575] (issues: {issue}12546[#12546], {issue}12700[#12700])
-
-Search Refactoring::
-* Remove RescoreParseElement {pull}17441[#17441]
-* Remove HighlighterParseElement {pull}17303[#17303]
-* Move top level parsing of sort element to SortBuilder {pull}17248[#17248]
-* Switch to using refactored SortBuilder instead of using BytesReference in serialization {pull}17205[#17205] (issues: {issue}17146[#17146], {issue}17257[#17257])
-* Add build() method to SortBuilder implementations {pull}17146[#17146] (issue: {issue}10217[#10217])
-* Refactoring of Suggestions {pull}17096[#17096] (issue: {issue}10217[#10217])
-* Move sort `order` field up into SortBuilder {pull}17035[#17035]
-* Moves SortParser:parse(...) to only require QueryShardContext {pull}16999[#16999] (issue: {issue}15178[#15178])
-* Change internal representation of suggesters {pull}16873[#16873]
-* Make GeoDistanceSortBuilder serializable, 2nd try {pull}16572[#16572] (issues: {issue}15178[#15178], {issue}16151[#16151])
-* Move missing() from SortBuilder interface to class {pull}16225[#16225] (issues: {issue}15178[#15178], {issue}16151[#16151])
-* Remove deprecated parameters from ScriptSortBuilder {pull}16153[#16153] (issue: {issue}15178[#15178])
-* Refactor GeoSortBuilder {pull}16151[#16151] (issue: {issue}15178[#15178])
-* Refactor FieldSortBuilder {pull}16127[#16127] (issue: {issue}15178[#15178])
-* Make sort order enum writable. {pull}16124[#16124] (issue: {issue}15178[#15178])
-* Make DistanceUnit writable. {pull}16122[#16122] (issue: {issue}15178[#15178])
-* RescoreBuilder: Add parsing and creating of RescoreSearchContext {pull}16014[#16014] (issue: {issue}15559[#15559])
-* Make RescoreBuilder and nested QueryRescorer Writable {pull}15953[#15953] (issue: {issue}15559[#15559])
-* Explain api: move query parsing to the coordinating node {pull}14270[#14270]
-* Switch query parsers to use ParseField {pull}14249[#14249] (issue: {issue}8964[#8964])
-* Refactoring of Aggregations {pull}14136[#14136]
-
-Settings::
-* Add guard against null-valued settings {pull}17310[#17310] (issue: {issue}17292[#17292])
-* Useful error message for null property placeholder {pull}17293[#17293] (issue: {issue}17292[#17292])
-* Archive cluster level settings if unknown or broken {pull}17246[#17246]
-* Improve error message if setting is not found {pull}17230[#17230]
-* Improve upgrade experience of node level index settings {pull}17223[#17223] (issue: {issue}17187[#17187])
-* Settings with complex matchers should not overlap {pull}16754[#16754]
-* Moves GCE settings to the new infra {pull}16722[#16722] (issue: {issue}16720[#16720])
-* Add filtering support within Setting class {pull}16629[#16629] (issue: {issue}16598[#16598])
-* Migrate AWS settings to new settings infrastructure {pull}16602[#16602] (issue: {issue}16293[#16293])
-* Remove `gateway.initial_meta` and always rely on min master nodes {pull}16446[#16446]
-* Rewrite SettingsFilter to be immutable {pull}16425[#16425]
-* Simplify azure settings {pull}16363[#16363]
-* Convert PageCacheRecycler settings {pull}16341[#16341]
-* Monitor settings {pull}16313[#16313]
-* Cut over tribe node settings to new settings infra {pull}16311[#16311]
-* Convert multcast plugin settings to the new infra {pull}16295[#16295]
-* Convert `request.headers.*` to the new settings infra {pull}16292[#16292]
-* Migrate Azure settings to new settings infrastructure {pull}16291[#16291]
-* Validate logger settings and allow them to be reset via API {pull}16289[#16289]
-* Switch NodeEnvironment's settings to new settings {pull}16273[#16273]
-* Simplify AutoCreateIndex and add more tests {pull}16270[#16270]
-* Convert several pending settings {pull}16269[#16269]
-* Migrate query caching settings to the new settings infra. {pull}16267[#16267]
-* Convert `action.auto_create_index` and `action.master.force_local` to the new settings infra {pull}16263[#16263]
-* Convert `cluster.routing.allocation.type` and `processors` to the new settings infra. {pull}16238[#16238]
-* Validate tribe node settings on startup {pull}16237[#16237]
-* Move node.client, node.data, node.master, node.local and node.mode to new settings infra {pull}16230[#16230]
-* Moved http settings to the new settings infrastructure {pull}16188[#16188]
-* Migrate network service to the new infra {pull}16187[#16187]
-* Convert client.transport settings to new infra {pull}16183[#16183]
-* Move discovery.* settings to new Setting infrastructure {pull}16182[#16182]
-* Change over to o.e.common.settings.Setting for http settings {pull}16181[#16181]
-* Convert "path.*" and "pidfile" to new settings infra {pull}16180[#16180]
-* Migrate repository settings to the new settings API {pull}16178[#16178]
-* Convert "indices.*" settings to new infra. {pull}16177[#16177]
-* Migrate gateway settings to the new settings API. {pull}16175[#16175]
-* Convert several node and test level settings {pull}16172[#16172]
-* Run Metadata upgrade tool on every version {pull}16168[#16168]
-* Check for invalid index settings on metadata upgrade {pull}16156[#16156]
-* Validate the settings key if it's simple chars separated by `.` {pull}16120[#16120]
-* Validate known global settings on startup {pull}16091[#16091]
-* Cut over all index scope settings to the new setting infrastrucuture {pull}16054[#16054] (issues: {issue}12790[#12790], {issue}12854[#12854], {issue}16032[#16032], {issue}6732[#6732])
-* Remove updatability of `index.flush_on_close` {pull}15964[#15964] (issue: {issue}15955[#15955])
-* Move all dynamic settings and their config classes to the index level {pull}15955[#15955] (issue: {issue}6732[#6732])
-* Always require units for bytes and time settings {pull}15948[#15948] (issue: {issue}11437[#11437])
-* Make MetaData parsing less lenient. {pull}15828[#15828]
-* Move async translog sync logic into IndexService {pull}15584[#15584]
-* Remove `index.merge.scheduler.notify_on_failure` and default to `true` {pull}15572[#15572] (issue: {issue}15570[#15570])
-* Remove cache concurrency level settings that no longer apply {pull}14210[#14210] (issues: {issue}13224[#13224], {issue}13717[#13717], {issue}7836[#7836])
-
-Similarities::
-* Defining a global default similarity {pull}16682[#16682] (issue: {issue}16594[#16594])
-
-Stats::
-* Normalize unavailable load average {pull}16061[#16061] (issues: {issue}12049[#12049], {issue}14741[#14741], {issue}15907[#15907], {issue}15932[#15932], {issue}15934[#15934])
-* Add load averages to OS stats on FreeBSD {pull}15934[#15934] (issue: {issue}15917[#15917])
-* Expose pending cluster state queue size in node stats {pull}14040[#14040] (issue: {issue}13610[#13610])
-
-Store::
-* Remove support for legacy checksums {pull}16931[#16931]
-* Rename index folder to index_uuid {pull}16442[#16442] (issues: {issue}13264[#13264], {issue}13265[#13265], {issue}14512[#14512], {issue}14932[#14932], {issue}15853[#15853])
-
-Task Manager::
-* Add ability to group tasks by common parent {pull}17341[#17341]
-* Add start time and duration to tasks {pull}16829[#16829]
-* Combine node name and task id into single string task id {pull}16744[#16744]
-* Add task status {pull}16356[#16356] (issue: {issue}16344[#16344])
-* Extend tracking of parent tasks to master node, replication and broadcast actions {pull}15931[#15931]
-
-Translog::
-* Remove ChannelReference and simplify Views {pull}15898[#15898]
-* Simplify TranslogWriter to always write to a stream {pull}15771[#15771]
-* Remove TranslogService and fold it into synchronous IndexShard API {pull}13707[#13707]
-
-
-
-[[bug-5.0.0-alpha1]]
-[float]
-=== Bug fixes
-
-Aggregations::
-* Fixes the defaults for `keyed` in the percentiles aggregations {pull}17217[#17217]
-* Correct typo in class name of StatsAggregator {pull}15264[#15264] (issue: {issue}14730[#14730])
-
-Allocation::
-* Replica shards must be failed before primary shards {pull}15686[#15686]
-
-CRUD::
-* Prevent TransportReplicationAction to route request based on stale local routing table {pull}16274[#16274] (issues: {issue}12573[#12573], {issue}12574[#12574])
-* Resolves the conflict between alias routing and parent routing by applying the alias routing and ignoring the parent routing. {pull}15371[#15371] (issue: {issue}3068[#3068])
-
-Cluster::
-* Shard state action channel exceptions {pull}16057[#16057] (issue: {issue}15748[#15748])
-
-Core::
-* Handle RejectedExecution gracefully in TransportService during shutdown {pull}16965[#16965]
-
-Geo::
-* Fix multi-field support for GeoPoint types {pull}15702[#15702] (issue: {issue}15701[#15701])
-* Enforce distance in distance query is > 0 [ISSUE] {pull}15135[#15135]
-
-Ingest::
-* Handle regex parsing errors in Gsub and Grok Processors {pull}17260[#17260]
-* add on_failure exception metadata to ingest document for verbose simulate {pull}16562[#16562]
-* The IngestDocument copy constructor should make a deep copy {pull}16248[#16248] (issue: {issue}16246[#16246])
-
-Internal::
-* Enable unmap hack for java 9 {pull}16986[#16986] (issue: {issue}1[#1])
-* Fix issues with failed cache loads {pull}14315[#14315]
-* Allow parser to move on the START_OBJECT token when parsing search source {pull}14145[#14145]
-* Ensure searcher is release if wrapping fails {pull}14107[#14107]
-* Avoid deadlocks in Cache#computeIfAbsent {pull}14091[#14091] (issue: {issue}14090[#14090])
-
-Java API::
-* Fix potential NPE in SearchSourceBuilder {pull}16905[#16905] (issue: {issue}16902[#16902])
-
-Mapping::
-* Make dynamic template parsing less lenient. {pull}17249[#17249]
-* Fix dynamic mapper when its parent already has an update {pull}17065[#17065]
-* Fix copy_to when the target is a dynamic object field. {pull}15216[#15216] (issues: {issue}111237[#111237], {issue}11237[#11237])
-* Preserve existing mappings on batch mapping updates {pull}15130[#15130] (issues: {issue}14899[#14899], {issue}15129[#15129])
-
-Packaging::
-* Set MAX_OPEN_FILES to 65536 {pull}17431[#17431] (issue: {issue}17430[#17430])
-* [windows] Service command still had positional start command {pull}17391[#17391]
-* Do not pass double-dash arguments on startup {pull}17087[#17087] (issue: {issue}17084[#17084])
-
-Percolator::
-* Let PercolatorQuery's explain use the two phase iterator {pull}17315[#17315] (issue: {issue}17314[#17314])
-
-Plugin Store SMB::
-* Fix calling ensureOpen() on the wrong directory (master forwardport) {pull}16395[#16395] (issue: {issue}16383[#16383])
-
-Plugins::
-* CliTool: Messages printed in Terminal should have percent char escaped {pull}16367[#16367]
-
-Query DSL::
-* Resolve string dates and date math to millis before evaluating for rewrite in range query {pull}17239[#17239]
-* `constant_score` query should throw error on more than one filter {pull}17135[#17135] (issue: {issue}17126[#17126])
-* Single IPv4 addresses in IP field term queries {pull}16068[#16068] (issue: {issue}16058[#16058])
-* Make strategy optional in GeoShapeQueryBuilder readFrom and writeTo {pull}13963[#13963]
-
-Query Refactoring::
-* Query refactoring: set has_parent & has_child types context properly {pull}13863[#13863]
-* Make sure equivalent geohashCellQueries are equal after toQuery called {pull}13792[#13792]
-
-Recovery::
-* Invoke `IndexingOperationListeners` also when recovering from store or remote {pull}17406[#17406]
-* Prevent interruption while store checks lucene files for consistency {pull}16308[#16308]
-* Mark shard as recovering on the cluster state thread {pull}14276[#14276] (issues: {issue}13766[#13766], {issue}14115[#14115])
-
-Search::
-* Fix for search after {pull}16271[#16271]
-* Do not be lenient when parsing CIDRs {pull}14874[#14874] (issue: {issue}14862[#14862])
-
-Settings::
-* Register bootstrap settings {pull}16513[#16513]
-* Add settings filtering to node info requests {pull}16445[#16445]
-* Ban write access to system properties {pull}14914[#14914]
-
-Task Manager::
-* Take filterNodeIds into consideration while sending task requests to nodes {pull}17081[#17081]
-
-Translog::
-* Move translog recover outside of the engine {pull}17422[#17422]
-* Mark shard active during recovery; push settings after engine finally inits {pull}16250[#16250] (issues: {issue}14121[#14121], {issue}16209[#16209])
-
-
-
-[[upgrade-5.0.0-alpha1]]
-[float]
-=== Upgrades
-
-Core::
-* Upgrade to lucene-6.0.0-f0aa4fc. {pull}17075[#17075]
-* upgrade to lucene 6.0.0-snapshot-bea235f {pull}16964[#16964]
-* Upgrade to Jackson 2.7.1 {pull}16801[#16801] (issue: {issue}16294[#16294])
-
-Ingest::
-* Update MaxMind geoip2 version to 2.6 {pull}16837[#16837] (issue: {issue}16801[#16801])
-
-Internal::
-* Bump master (3.0-snapshot) to java 8 {pull}13314[#13314]
-
-Search Templates::
-* Update mustache.java to version 0.9.1 {pull}14053[#14053] (issue: {issue}13224[#13224])
-
diff --git a/docs/reference/release-notes/5.0.0-alpha2.asciidoc b/docs/reference/release-notes/5.0.0-alpha2.asciidoc
deleted file mode 100644
index b088abf89a..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha2.asciidoc
+++ /dev/null
@@ -1,268 +0,0 @@
-[[release-notes-5.0.0-alpha2]]
-== 5.0.0-alpha2 Release Notes
-
-Also see <<breaking-changes-5.0>>.
-
-IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha2 to any other version is not supported.
-
-[[breaking-5.0.0-alpha2]]
-[float]
-=== Breaking changes
-
-Analysis::
-* Analyze API : Rename filters/token_filters/char_filter in Analyze API in master {pull}17843[#17843] (issue: {issue}15189[#15189])
-
-Cluster::
-* Remove validation errors from cluster health response {pull}17773[#17773] (issue: {issue}16979[#16979])
-
-Indexed Scripts/Templates::
-* Store indexed scripts in the cluster state instead of the `.scripts` index {pull}17650[#17650] (issue: {issue}16651[#16651])
-
-Packaging::
-* Add JVM options configuration file {pull}17675[#17675] (issue: {issue}17121[#17121])
-
-Percolator::
-* Remove `.percolator` type in favour of `percolator` field type {pull}17560[#17560]
-
-REST::
-* Remove camelCase support {pull}17933[#17933] (issue: {issue}8988[#8988])
-* Remove 'case' parameter from rest apis {pull}17774[#17774] (issue: {issue}8988[#8988])
-* Disallow unquoted field names {pull}15351[#15351] (issue: {issue}9800[#9800])
-
-Settings::
-* Remove `action.get.realtime` setting {pull}17857[#17857] (issue: {issue}12543[#12543])
-
-
-
-[[breaking java-5.0.0-alpha2]]
-[float]
-=== Breaking Java changes
-
-Settings::
-* Remove Settings.settingsBuilder. {pull}17619[#17619]
-
-
-
-[[deprecation-5.0.0-alpha2]]
-[float]
-=== Deprecations
-
-Query DSL::
-* Deprecate Indices query {pull}17710[#17710] (issue: {issue}12017[#12017])
-* Deprecate mlt, in and geo_bbox query name shortcuts {pull}17507[#17507]
-
-Query Refactoring::
-* Splits `phrase` and `phrase_prefix` in match query into `MatchPhraseQueryBuilder` and `MatchPhrasePrefixQueryBuilder` {pull}17508[#17508]
-
-
-
-[[feature-5.0.0-alpha2]]
-[float]
-=== New features
-
-Analysis::
-* Add `fingerprint` token filter and `fingerprint` analyzer {pull}17873[#17873] (issue: {issue}13325[#13325])
-
-Plugin Analysis ICU::
-* Adding support for customizing the rule file in ICU tokenizer {pull}13651[#13651] (issue: {issue}13146[#13146])
-
-
-
-[[enhancement-5.0.0-alpha2]]
-[float]
-=== Enhancements
-
-CAT API::
-* Add _cat/tasks {pull}17551[#17551]
-* Cat health supports ts=0 option {pull}13508[#13508] (issue: {issue}10109[#10109])
-
-Cache::
-* Allow the query cache to be disabled. {pull}16268[#16268] (issue: {issue}15802[#15802])
-
-Circuit Breakers::
-* Limit request size {pull}17133[#17133] (issue: {issue}16011[#16011])
-
-Cluster::
-* Adds tombstones to cluster state for index deletions {pull}17265[#17265] (issues: {issue}16358[#16358], {issue}17435[#17435])
-* Enable acked indexing {pull}17038[#17038] (issue: {issue}7572[#7572])
-
-Core::
-* Kill thread local leak {pull}17921[#17921] (issues: {issue}283[#283], {issue}630[#630])
-* Add heap size bootstrap check {pull}17728[#17728] (issue: {issue}17490[#17490])
-* Remove hostname from NetworkAddress.format {pull}17601[#17601] (issue: {issue}17604[#17604])
-* Bootstrapping bootstrap checks {pull}17595[#17595] (issues: {issue}17474[#17474], {issue}17570[#17570])
-* Add max map count check {pull}16944[#16944]
-
-Geo::
-* Enhanced lat/long error handling {pull}16833[#16833] (issue: {issue}16137[#16137])
-
-Index APIs::
-* Fail hot_threads in a better way if unsupported by JDK {pull}15909[#15909]
-
-Ingest::
-* Streamline option naming for several processors {pull}17892[#17892] (issue: {issue}17835[#17835])
-
-Internal::
-* Makes Script type writeable {pull}17908[#17908] (issue: {issue}17753[#17753])
-* FiltersAggregatorBuilder: Don't create new context for inner parsing {pull}17851[#17851]
-* Clean up serialization on some stats {pull}17832[#17832] (issue: {issue}17085[#17085])
-* Normalize registration for SignificanceHeuristics {pull}17830[#17830] (issue: {issue}17085[#17085])
-* Make (read|write)NamedWriteable public {pull}17829[#17829] (issue: {issue}17682[#17682])
-* Use try-with-resource when creating new parser instances where possible {pull}17822[#17822]
-* Don't pass XContentParser to ParseFieldRegistry#lookup {pull}17794[#17794]
-* Internal: Remove threadlocal from document parser {pull}17764[#17764]
-* Cut range aggregations to registerAggregation {pull}17757[#17757] (issue: {issue}17085[#17085])
-* Remove ParseFieldMatcher from AbstractXContentParser {pull}17756[#17756] (issue: {issue}17417[#17417])
-* Remove parser argument from methods where we already pass in a parse context {pull}17738[#17738]
-* Switch SearchAfterBuilder to writeGenericValue {pull}17735[#17735] (issue: {issue}17085[#17085])
-* Remove StreamableReader {pull}17729[#17729] (issue: {issue}17085[#17085])
-* Cleanup nested, has_child & has_parent query builders for inner hits construction {pull}17719[#17719] (issue: {issue}11118[#11118])
-* Make AllocationCommands NamedWriteables {pull}17661[#17661]
-* Isolate StreamableReader {pull}17656[#17656] (issue: {issue}17085[#17085])
-* Create registration methods for aggregations similar to those for queries {pull}17653[#17653] (issues: {issue}17085[#17085], {issue}17389[#17389])
-* Turn RestChannel into an interface {pull}17643[#17643] (issue: {issue}17133[#17133])
-* Remove PROTOTYPEs from QueryBuilders {pull}17632[#17632] (issue: {issue}17085[#17085])
-* Remove registerQueryParser {pull}17608[#17608]
-* ParseField#getAllNamesIncludedDeprecated to not return duplicate names {pull}17504[#17504]
-* Rework a query parser and improve registration {pull}17458[#17458]
-* Clean up QueryParseContext and don't hold it inside QueryRewrite/ShardContext {pull}17417[#17417]
-
-Mapping::
-* Automatically upgrade analyzed strings with an analyzer to `text`. {pull}17861[#17861]
-* Support dots in field names when mapping already exists {pull}17759[#17759] (issue: {issue}15951[#15951])
-* Use the new points API to index numeric fields. {pull}17746[#17746] (issues: {issue}11513[#11513], {issue}16751[#16751], {issue}17007[#17007], {issue}17700[#17700])
-* Simplify AllEntries, AllField and AllFieldMapper: {pull}17613[#17613]
-
-Network::
-* Limit request size {pull}17133[#17133] (issue: {issue}16011[#16011])
-
-Packaging::
-* Remove unnecessary sleep from init script restart {pull}17966[#17966]
-* Explicitly set packaging permissions {pull}17912[#17912] (issue: {issue}17634[#17634])
-* Allow configuring Windows service name, description and user {pull}17312[#17312]
-* rpm uses non-portable `--system` flag to `useradd` {pull}14596[#14596] (issue: {issue}14211[#14211])
-
-Percolator::
-* PercolatorQueryBuilder cleanup by using MemoryIndex#fromDocument(...) helper {pull}17669[#17669] (issue: {issue}9386[#9386])
-
-Plugins::
-* Cli: Improve output for usage errors {pull}17938[#17938]
-* Cli: Add verbose output with zip url when installing plugin {pull}17662[#17662] (issue: {issue}17529[#17529])
-
-Query DSL::
-* Add MatchNoDocsQuery, a query that matches no documents and prints the reason why in the toString method. {pull}17780[#17780]
-* Adds `ignore_unmapped` option to geo queries {pull}17751[#17751]
-* Adds `ignore_unmapped` option to nested and P/C queries {pull}17748[#17748]
-* SimpleQueryParser should call MappedFieldType.termQuery when appropriate. {pull}17678[#17678]
-
-REST::
-* Allow JSON with unquoted field names by enabling system property {pull}17801[#17801] (issue: {issue}17674[#17674])
-
-Recovery::
-* TransportNodesListGatewayStartedShards should fall back to disk based index metadata if not found in cluster state {pull}17663[#17663] (issue: {issue}17630[#17630])
-
-Reindex API::
-* Properly mark reindex's child tasks as child tasks {pull}17770[#17770]
-
-Search::
-* Fail query if it contains very large rescores {pull}17917[#17917] (issue: {issue}17522[#17522])
-
-Settings::
-* Switch to registered Settings for all IndexingMemoryController settings {pull}17778[#17778] (issue: {issue}17442[#17442])
-
-Stats::
-* Add points to SegmentStats. {pull}17775[#17775] (issue: {issue}16974[#16974])
-* Remove FieldStats.Float. {pull}17749[#17749]
-* Show configured and remaining delay for an unassigned shard. {pull}17515[#17515] (issue: {issue}17372[#17372])
-
-Store::
-* Use `mmapfs` by default. {pull}17616[#17616] (issue: {issue}16983[#16983])
-
-Suggesters::
-* Add bwc support for reading pre-5.0 completion index {pull}17602[#17602]
-
-Task Manager::
-* Move parentTaskId into TransportRequest {pull}17872[#17872]
-* Shorten the serialization of the empty TaskId {pull}17870[#17870]
-* Expose whether a task is cancellable in the _tasks list API {pull}17464[#17464] (issue: {issue}17369[#17369])
-
-
-
-[[bug-5.0.0-alpha2]]
-[float]
-=== Bug fixes
-
-Aggregations::
-* Adds serialisation of sigma to extended_stats_bucket pipeline aggregation {pull}17703[#17703] (issue: {issue}17701[#17701])
-* Fixes NPE when no window is specified in moving average request {pull}17556[#17556] (issue: {issue}17516[#17516])
-* Fixes Filter and FiltersAggregation to work with empty query {pull}17542[#17542] (issue: {issue}17518[#17518])
-* ExtendedStatsAggregator should also pass sigma to emtpy aggs. {pull}17388[#17388] (issue: {issue}17362[#17362])
-
-Allocation::
-* Rebalancing policy shouldn't prevent hard allocation decisions {pull}17698[#17698] (issues: {issue}14057[#14057], {issue}14259[#14259])
-* When considering the size of shadow replica shards, set size to 0 {pull}17509[#17509] (issue: {issue}17460[#17460])
-
-Core::
-* Refactor UUID-generating methods out of Strings {pull}17837[#17837] (issue: {issue}17819[#17819])
-* Node names cleanup {pull}17723[#17723] (issue: {issue}17718[#17718])
-* NullPointerException from IndexingMemoryController when a version conflict happens during recovery {pull}17569[#17569]
-
-Ingest::
-* Ingest does not close its factories {pull}17626[#17626] (issue: {issue}17625[#17625])
-
-Internal::
-* Fix BulkItemResponse.Failure.toString {pull}17871[#17871]
-
-Logging::
-* Add missing index name to search slow log. {pull}17818[#17818] (issue: {issue}17025[#17025])
-
-Mapping::
-* Fix cross type mapping updates for `boolean` fields. {pull}17882[#17882] (issue: {issue}17879[#17879])
-* Fix dynamic check to properly handle parents {pull}17864[#17864] (issues: {issue}17644[#17644], {issue}17854[#17854])
-* Fix array parsing to remove its context when finished parsing {pull}17768[#17768]
-* Disallow fielddata loading on text fields that are not indexed. {pull}17747[#17747]
-* Fail if an object is added after a field with the same name. {pull}17568[#17568] (issue: {issue}17567[#17567])
-
-Packaging::
-* Fix exit code {pull}17082[#17082]
-
-Plugin Discovery EC2::
-* Fix EC2 Discovery settings {pull}17651[#17651] (issue: {issue}16602[#16602])
-
-Plugins::
-* Quote path to java binary {pull}17496[#17496] (issue: {issue}17495[#17495])
-
-Query DSL::
-* Apply the default operator on analyzed wildcard in simple_query_string builder {pull}17776[#17776]
-* Apply the default operator on analyzed wildcard in query_string builder: {pull}17711[#17711] (issue: {issue}2183[#2183])
-
-REST::
-* Fixes reading of CORS pre-flight headers and methods {pull}17523[#17523] (issue: {issue}17483[#17483])
-* index is a required url part for update by query {pull}17503[#17503]
-
-Reindex API::
-* Reindex should never report negative throttled_until {pull}17799[#17799] (issue: {issue}17783[#17783])
-* Reindex should gracefully handle when _source is disabled {pull}17667[#17667] (issue: {issue}17666[#17666])
-
-Settings::
-* convert settings for ResourceWatcherService to new infrastructure {pull}17948[#17948]
-
-Snapshot/Restore::
-* Fix the semantics for the BlobContainer interface {pull}17878[#17878] (issues: {issue}15579[#15579], {issue}15580[#15580])
-* On restore, selecting concrete indices can select wrong index {pull}17715[#17715]
-
-Task Manager::
-* Shard level tasks in Bulk Action lose reference to their parent tasks {pull}17743[#17743]
-
-Term Vectors::
-* Fix calculation of took time of term vectors request {pull}17817[#17817] (issue: {issue}12565[#12565])
-
-
-
-[[upgrade-5.0.0-alpha2]]
-[float]
-=== Upgrades
-
-Core::
-* Upgrade to lucene 6 release {pull}17657[#17657]
-
diff --git a/docs/reference/release-notes/5.0.0-alpha3.asciidoc b/docs/reference/release-notes/5.0.0-alpha3.asciidoc
deleted file mode 100644
index 411f34e46b..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha3.asciidoc
+++ /dev/null
@@ -1,339 +0,0 @@
-[[release-notes-5.0.0-alpha3]]
-== 5.0.0-alpha3 Release Notes
-
-Also see <<breaking-changes-5.0>>.
-
-IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha3 to any other version is not supported.
-
-[[breaking-5.0.0-alpha3]]
-[float]
-=== Breaking changes
-
-CAT API::
-* Row-centric output for _cat/fielddata {pull}18068[#18068] (issue: {issue}10249[#10249])
-
-Inner Hits::
-* Drop top level inner hits in favour of inner hits defined in the query dsl {pull}17816[#17816] (issue: {issue}11118[#11118])
-
-Internal::
-* Actually bound the generic thread pool {pull}17017[#17017]
-
-Packaging::
-* Require /bin/bash in packaging {pull}18259[#18259] (issue: {issue}18251[#18251])
-* Remove plugin script parsing of system properties {pull}18207[#18207] (issue: {issue}18140[#18140])
-
-Plugin Delete By Query::
-* Remove Delete-By-Query plugin {pull}18516[#18516] (issue: {issue}18469[#18469])
-
-Query DSL::
-* Lessen leniency of the query dsl. {pull}18276[#18276]
-
-Scripting::
-* Allow only a single extension for a scripting engine {pull}18332[#18332] (issue: {issue}10598[#10598])
-* Remove 'sandbox' option for script settings, allow only registering a single language. {pull}18226[#18226] (issues: {issue}10598[#10598], {issue}17114[#17114])
-
-Settings::
-* Register `indices.query.bool.max_clause_count` setting {pull}18341[#18341] (issue: {issue}18336[#18336])
-* Remove settings and system properties entanglement {pull}18198[#18198] (issue: {issue}18197[#18197])
-
-
-
-[[breaking java-5.0.0-alpha3]]
-[float]
-=== Breaking Java changes
-
-Allocation::
-* Move parsing of allocation commands into REST and remove support for plugins to register allocation commands {pull}17802[#17802] (issue: {issue}17894[#17894])
-
-Discovery::
-* Introduce node handshake {pull}15971[#15971] (issue: {issue}9061[#9061])
-
-Percolator::
-* Move the percolator from core to its own module {pull}18511[#18511]
-* Remove percolator cache {pull}18434[#18434]
-
-Scripting::
-* Remove LeafSearchScript.runAsFloat(): Nothing calls it. {pull}18364[#18364]
-
-Search::
-* Refactor of query profile classes to make way for other profile implementations {pull}18370[#18370] (issue: {issue}10538[#10538])
-
-
-
-[[feature-5.0.0-alpha3]]
-[float]
-=== New features
-
-Ingest::
-* Add a Sort ingest processor {pull}17999[#17999]
-* Add date_index_name processor {pull}17973[#17973] (issue: {issue}17814[#17814])
-
-Reindex API::
-* Port Delete By Query to Reindex infrastructure {pull}18329[#18329] (issue: {issue}16883[#16883])
-
-Snapshot/Restore::
-* Add Google Cloud Storage repository plugin {pull}13578[#13578] (issue: {issue}12880[#12880])
-
-Stats::
-* Extend field stats to report searchable/aggregatable fields {pull}17980[#17980] (issue: {issue}17750[#17750])
-
-
-
-[[enhancement-5.0.0-alpha3]]
-[float]
-=== Enhancements
-
-Aggregations::
-* Rename AggregatorBuilder to AggregationBuilder {pull}18377[#18377] (issue: {issue}18367[#18367])
-* Add the ability to use the breadth_first mode with nested aggregations (such as `top_hits`) which require access to score information. {pull}18127[#18127] (issue: {issue}9825[#9825])
-* Make significant terms work on fields that are indexed with points. {pull}18031[#18031]
-
-Allocation::
-* Limit retries of failed allocations per index {pull}18467[#18467] (issue: {issue}18417[#18417])
-* Immutable ShardRouting {pull}17821[#17821]
-* Add the shard's store status to the explain API {pull}17689[#17689] (issue: {issue}17372[#17372])
-
-Analysis::
-* Core: better error message when analyzer created without tokenizer or… {pull}18455[#18455] (issue: {issue}15492[#15492])
-* Move AsciiFolding earlier in FingerprintAnalyzer filter chain {pull}18281[#18281] (issue: {issue}18266[#18266])
-
-CAT API::
-* Add node name to Cat Recovery {pull}18187[#18187] (issue: {issue}8041[#8041])
-* Add support for documented byte/size units and for micros as a time unit in _cat API {pull}17779[#17779]
-
-Core::
-* Log OS and JVM on startup {pull}18557[#18557]
-* Add GC overhead logging {pull}18419[#18419]
-* Refactor JvmGcMonitorService for testing {pull}18378[#18378]
-* Default to server VM and add client VM check {pull}18155[#18155]
-* Add system bootstrap checks escape hatch {pull}18088[#18088]
-* Avoid sliced locked contention in internal engine {pull}18060[#18060] (issue: {issue}18053[#18053])
-
-Dates::
-* Support full range of Java Long for epoch DateTime {pull}18509[#18509] (issue: {issue}17936[#17936])
-
-Discovery::
-* Log warning if minimum_master_nodes set to less than quorum {pull}15625[#15625]
-
-Exceptions::
-* Make the index-too-old exception more explicit {pull}18438[#18438]
-* Add index name in IndexAlreadyExistsException default message {pull}18274[#18274]
-
-Expressions::
-* Support geo_point fields in lucene expressions {pull}18096[#18096]
-* Add support for .empty to expressions, and some docs improvements {pull}18077[#18077]
-
-Ingest::
-* Expose underlying processor to blame for thrown exception within CompoundProcessor {pull}18342[#18342] (issue: {issue}17823[#17823])
-* Avoid string concatentation in IngestDocument.FieldPath {pull}18108[#18108]
-* add ability to specify multiple grok patterns {pull}18074[#18074] (issue: {issue}17903[#17903])
-* add ability to disable ability to override values of existing fields in set processor {pull}17902[#17902] (issue: {issue}17659[#17659])
-
-Inner Hits::
-* Change scriptFields member in InnerHitBuilder to set {pull}18092[#18092] (issue: {issue}5831[#5831])
-
-Internal::
-* Do not automatically close XContent objects/arrays {pull}18549[#18549] (issue: {issue}18433[#18433])
-* Remove use of a Fields class in snapshot responses {pull}18497[#18497]
-* Removes multiple toXContent entry points for SnapshotInfo {pull}18494[#18494]
-* Removes unused methods in the o/e/common/Strings class {pull}18346[#18346]
-* Determine content length eagerly in HttpServer {pull}18203[#18203]
-* Consolidate query generation in QueryShardContext {pull}18129[#18129]
-* Make reset in QueryShardContext private {pull}18113[#18113]
-* Remove Strings#splitStringToArray {pull}18110[#18110]
-* Add toString() to GetResponse {pull}18102[#18102]
-* ConstructingObjectParser adapts ObjectParser for ctor args {pull}17596[#17596] (issue: {issue}17352[#17352])
-
-Java API::
-* Improve adding clauses to `span_near` and `span_or` query {pull}18485[#18485] (issue: {issue}18478[#18478])
-* QueryBuilder does not need generics. {pull}18133[#18133]
-
-Mapping::
-* Adds a methods to find (and dynamically create) the mappers for the parents of a field with dots in the field name {pull}18106[#18106] (issue: {issue}15951[#15951])
-
-Network::
-* Netty request/response tracer should wait for send {pull}18500[#18500]
-* Exclude specific transport actions from request size limit check {pull}17951[#17951]
-
-Packaging::
-* Don't mkdir directly in deb init script {pull}18503[#18503] (issue: {issue}18307[#18307])
-* Increase default heap size to 2g {pull}18311[#18311] (issues: {issue}16334[#16334], {issue}17686[#17686], {issue}18309[#18309])
-* Switch init.d scripts to use bash {pull}18308[#18308] (issue: {issue}18259[#18259])
-* Switch scripts to use bash {pull}18251[#18251] (issue: {issue}14002[#14002])
-* Further simplifications of plugin script {pull}18239[#18239] (issue: {issue}18207[#18207])
-* Pass ES_JAVA_OPTS to JVM for plugins script {pull}18140[#18140] (issue: {issue}16790[#16790])
-
-Parent/Child::
-* Allow adding additional child types that point to an existing parent type {pull}18446[#18446] (issue: {issue}17956[#17956])
-
-Plugin Lang Painless::
-* improve painless whitelist coverage of java api {pull}18533[#18533]
-* Definition cleanup {pull}18463[#18463]
-* Made def variable casting consistent with invokedynamic rules {pull}18425[#18425]
-* Use Java 9 Indy String Concats, if available {pull}18400[#18400] (issue: {issue}18398[#18398])
-* Add method overloading based on arity {pull}18385[#18385]
-* Refactor WriterUtils to extend ASM GeneratorAdapter {pull}18382[#18382]
-* Whitelist expansion {pull}18372[#18372]
-* Remove boxing when loading and storing values in "def" fields/arrays, remove boxing onsimple method calls of "def" methods {pull}18359[#18359]
-* Some cleanups {pull}18352[#18352]
-* Use isAssignableFrom instead of relying on ClassCastException {pull}18350[#18350]
-* Build descriptor of array and field load/store in code {pull}18338[#18338]
-* Rename the dynamic call site factory to DefBootstrap {pull}18335[#18335]
-* Cleanup of DynamicCallSite {pull}18323[#18323]
-* Improve exception stacktraces {pull}18319[#18319]
-* Make Line Number Available in Painless {pull}18298[#18298]
-* Remove input, support params instead {pull}18287[#18287]
-* Decouple ANTLR AST from Painless {pull}18286[#18286]
-* _value support in painess? {pull}18284[#18284]
-* Long priority over Float {pull}18282[#18282]
-* _score as double, not float {pull}18277[#18277]
-* Add 'ctx' keyword to painless. {pull}18264[#18264]
-* Painless doc access {pull}18262[#18262]
-* Retrieve _score directly from Scorer {pull}18258[#18258]
-* Implement needsScore() correctly. {pull}18247[#18247]
-* Add synthetic length property as alias to Lists, so they can be used like arrays {pull}18241[#18241]
-* Use better typing for dynamic method calls {pull}18234[#18234]
-* Array load/store and length with invokedynamic {pull}18232[#18232] (issue: {issue}18201[#18201])
-* Switch painless dynamic calls to invokedynamic, remove perf hack/cheat {pull}18201[#18201]
-* Add fielddata accessors (.value/.values/.distance()/etc) {pull}18169[#18169]
-* painless: optimize/simplify dynamic field and method access {pull}18151[#18151]
-* Painless: Single-Quoted Strings {pull}18150[#18150]
-
-Plugins::
-* Add plugin information for Verbose mode {pull}18051[#18051] (issue: {issue}16375[#16375])
-
-Query DSL::
-* Enforce MatchQueryBuilder#maxExpansions() to be strictly positive {pull}18464[#18464]
-* Don't allow `fuzziness` for `multi_match` types `cross_fields`, `phrase` and `phrase_prefix` {pull}18322[#18322] (issues: {issue}6866[#6866], {issue}7764[#7764])
-
-REST::
-* CORS handling triggered whether User-Agent is a browser or not {pull}18283[#18283]
-* Add semicolon query string parameter delimiter {pull}18186[#18186] (issue: {issue}18175[#18175])
-* Enable HTTP compression by default with compression level 3 {pull}18066[#18066] (issue: {issue}7309[#7309])
-
-Reindex API::
-* Make Reindex cancellation tests more uniform {pull}18498[#18498]
-* Makes DeleteByQueryRequest implements IndicesRequest {pull}18466[#18466]
-* Switch default batch size for reindex to 1000 {pull}18340[#18340]
-* Teach reindex to retry on search failures {pull}18331[#18331] (issue: {issue}18059[#18059])
-* Remove ReindexResponse in favor of BulkIndexByScrollResponse {pull}18205[#18205]
-* Stricter validation of Reindex's requests_per_second {pull}18028[#18028]
-
-Search::
-* Introduces GeoValidationMethod to GeoDistanceSortBuilder {pull}18036[#18036]
-* Switches from empty boolean query to matchNoDocs {pull}18007[#18007] (issue: {issue}17981[#17981])
-* Allow binary sort values. {pull}17959[#17959] (issue: {issue}6077[#6077])
-
-Search Refactoring::
-* Removes the now obsolete SearchParseElement implementations {pull}18233[#18233]
-
-Snapshot/Restore::
-* Change BlobPath.buildAsString() method {pull}18461[#18461]
-* Remove the Snapshot class in favor of using SnapshotInfo {pull}18167[#18167] (issue: {issue}18156[#18156])
-
-Stats::
-* Do not return fieldstats information for fields that exist in the mapping but not in the index. {pull}18212[#18212] (issue: {issue}17980[#17980])
-* Add whether the shard state fetch is pending to the allocation explain API {pull}18119[#18119] (issue: {issue}17372[#17372])
-* Add Failure Details to every NodesResponse {pull}17964[#17964] (issue: {issue}3740[#3740])
-* Add I/O statistics on Linux {pull}15915[#15915] (issue: {issue}15296[#15296])
-
-Translog::
-* FSync translog outside of the writers global lock {pull}18360[#18360]
-
-
-
-[[bug-5.0.0-alpha3]]
-[float]
-=== Bug fixes
-
-Aggregations::
-* Fix TimeUnitRounding for hour, minute and second units {pull}18415[#18415] (issue: {issue}18326[#18326])
-* Aggregations fix: support include/exclude strings for IP and dates {pull}18408[#18408] (issue: {issue}17705[#17705])
-* Fix xcontent rendering of ip terms aggs. {pull}18003[#18003] (issue: {issue}17971[#17971])
-* Improving parsing of sigma param for Extended Stats Bucket Aggregation {pull}17562[#17562] (issue: {issue}17499[#17499])
-
-Analysis::
-* Add `Character.MODIFIER_SYMBOL` to the list of symbol categories. {pull}18402[#18402] (issue: {issue}18388[#18388])
-
-Bulk::
-* Add not-null precondition check in BulkRequest {pull}18347[#18347] (issue: {issue}12038[#12038])
-
-CAT API::
-* Expand wildcards to closed indices in /_cat/indices {pull}18545[#18545] (issues: {issue}16419[#16419], {issue}17395[#17395])
-
-Circuit Breakers::
-* Free bytes reserved on request breaker {pull}18204[#18204] (issue: {issue}18144[#18144])
-
-Cluster::
-* Dangling indices are not imported if a tombstone for the index exists {pull}18250[#18250] (issue: {issue}18249[#18249])
-* Fix issue with tombstones matching active indices in cluster state {pull}18058[#18058] (issue: {issue}18054[#18054])
-
-Core::
-* Fix concurrency bug in IMC that could cause it to check too infrequently {pull}18357[#18357]
-* Iterables.flatten should not pre-cache the first iterator {pull}18355[#18355] (issue: {issue}18353[#18353])
-* Avoid race while retiring executors {pull}18333[#18333]
-* Don't try to compute completion stats on a reader after we already closed it {pull}18094[#18094]
-
-Highlighting::
-* Skip all geo point queries in plain highlighter {pull}18495[#18495] (issue: {issue}17537[#17537])
-* Exclude all but string fields from highlighting if wildcards are used… {pull}18183[#18183] (issue: {issue}17537[#17537])
-
-Ingest::
-* Pipeline Stats: Fix concurrent modification exception {pull}18177[#18177] (issue: {issue}18126[#18126])
-* Validate properties values according to database type {pull}17940[#17940] (issue: {issue}17683[#17683])
-
-Internal::
-* Add XPointValues {pull}18011[#18011] (issue: {issue}18010[#18010])
-
-Mapping::
-* Make doc_values accessible for _type {pull}18220[#18220]
-* Fix and test handling of `null_value`. {pull}18090[#18090] (issue: {issue}18085[#18085])
-* Fail automatic string upgrade if the value of `index` is not recognized. {pull}18082[#18082] (issue: {issue}18062[#18062])
-
-Packaging::
-* Filter client/server VM options from jvm.options {pull}18473[#18473]
-* Preserve config files from RPM install {pull}18188[#18188] (issue: {issue}18158[#18158])
-* Fix typo in message for variable setup ES_MAX_MEM {pull}18168[#18168]
-* Don't run `mkdir` when $DATA_DIR contains a comma-separated list {pull}17419[#17419] (issue: {issue}16992[#16992])
-
-Percolator::
-* Add support for MatchNoDocsQuery in percolator's query terms extract service {pull}18492[#18492]
-
-Plugin Discovery EC2::
-* Add TAG_SETTING to list of allowed tags for the ec2 discovery plugin. {pull}18257[#18257]
-
-Plugin Lang Painless::
-* Remove Grammar Ambiguities {pull}18531[#18531]
-* Remove if/else ANTLR ambiguity. {pull}18428[#18428]
-* Fix insanely slow compilation {pull}18410[#18410] (issue: {issue}18398[#18398])
-* Fix Bug in Painless Assignment {pull}18379[#18379]
-* Fix bracket shortcuts {pull}18263[#18263]
-
-Plugin Repository Azure::
-* Fix azure files removal {pull}18451[#18451] (issues: {issue}16472[#16472], {issue}18436[#18436])
-
-REST::
-* Do not decode path when sending error {pull}18477[#18477] (issue: {issue}18476[#18476])
-* CORS should permit same origin requests {pull}18278[#18278] (issue: {issue}18256[#18256])
-
-Search::
-* Fix _only_nodes preferences {pull}18483[#18483] (issues: {issue}12546[#12546], {issue}12700[#12700])
-* Speed up named queries. {pull}18470[#18470]
-* Fix parsing single `rescore` element in SearchSourceBuilder {pull}18440[#18440] (issue: {issue}18439[#18439])
-* Fail queries on not indexed fields. {pull}18014[#18014]
-
-Settings::
-* Use object equality to compare versions in IndexSettings {pull}18103[#18103]
-* fix exists method for list settings when using numbered setting format {pull}17949[#17949]
-
-Snapshot/Restore::
-* Fix race condition in snapshot initialization {pull}18426[#18426] (issue: {issue}18121[#18121])
-* Handle indices=["_all"] when restoring a snapshot {pull}18025[#18025]
-
-Stats::
-* Add missing builder.endObject() in FsInfo {pull}18443[#18443] (issues: {issue}15915[#15915], {issue}18433[#18433])
-
-Translog::
-* Snapshotting and sync could cause a dead lock TranslogWriter {pull}18481[#18481] (issues: {issue}1[#1], {issue}18360[#18360], {issue}2[#2])
-
-
diff --git a/docs/reference/release-notes/5.0.0-alpha4.asciidoc b/docs/reference/release-notes/5.0.0-alpha4.asciidoc
deleted file mode 100644
index fd53c7bb17..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha4.asciidoc
+++ /dev/null
@@ -1,361 +0,0 @@
-[[release-notes-5.0.0-alpha4]]
-== 5.0.0-alpha4 Release Notes
-
-Also see <<breaking-changes-5.0>>.
-
-IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha4 to any other version is not supported.
-
-[[breaking-5.0.0-alpha4]]
-[float]
-=== Breaking changes
-
-Aggregations::
-* Remove size 0 options in aggregations {pull}18854[#18854] (issue: {issue}18838[#18838])
-
-CRUD::
-* Wait for changes to be visible by search {pull}17986[#17986] (issue: {issue}1063[#1063])
-
-Core::
-* Remove cluster name from data path {pull}18554[#18554] (issue: {issue}17810[#17810])
-
-Inner Hits::
-* Also do not serialize `_index` key in search response for parent/child inner hits {pull}19011[#19011]
-* Don't include `_id`, `_type` and `_index` keys in search response for inner hits {pull}18995[#18995] (issue: {issue}18091[#18091])
-* Nested inner hits shouldn't use relative paths {pull}18567[#18567] (issue: {issue}16653[#16653])
-
-Mapping::
-* Remove `_timestamp` and `_ttl` on 5.x indices. {pull}18980[#18980] (issue: {issue}18280[#18280])
-
-Packaging::
-* Remove allow running as root {pull}18694[#18694] (issue: {issue}18688[#18688])
-
-Search::
-* Remove only node preference {pull}18875[#18875] (issue: {issue}18822[#18822])
-* Add search preference to prefer multiple nodes {pull}18872[#18872] (issue: {issue}18822[#18822])
-
-Settings::
-* Rename boostrap.mlockall to bootstrap.memory_lock {pull}18669[#18669]
-
-Snapshot/Restore::
-* Change the default of `include_global_state` from true to false for snapshot restores {pull}18773[#18773] (issue: {issue}18569[#18569])
-
-
-
-[[breaking java-5.0.0-alpha4]]
-[float]
-=== Breaking Java changes
-
-Core::
-* Register thread pool settings {pull}18674[#18674] (issues: {issue}18613[#18613], {issue}9216[#9216])
-
-Highlighting::
-* Register Highlighter instances instead of classes {pull}18859[#18859]
-
-Internal::
-* Cleanup ClusterService dependencies and detached from Guice {pull}18941[#18941]
-* Simplify SubFetchPhase interface {pull}18881[#18881]
-* Simplify FetchSubPhase registration and detach it from Guice {pull}18862[#18862]
-
-Java API::
-* Remove setRefresh {pull}18752[#18752] (issue: {issue}1063[#1063])
-
-Plugins::
-* Fail to start if plugin tries broken onModule {pull}19025[#19025]
-* Simplify ScriptModule and script registration {pull}18903[#18903]
-* Cut over settings registration to a pull model {pull}18890[#18890]
-* Plugins cleanup {pull}18594[#18594] (issue: {issue}18588[#18588])
-
-Scripting::
-* Move search template to lang-mustache module {pull}18765[#18765] (issue: {issue}17906[#17906])
-
-
-
-[[feature-5.0.0-alpha4]]
-[float]
-=== New features
-
-Aggregations::
-* Adds aggregation profiling to the profile API {pull}18414[#18414] (issue: {issue}10538[#10538])
-* New Matrix Stats Aggregation module {pull}18300[#18300] (issue: {issue}16826[#16826])
-
-Index APIs::
-* Add rollover API to switch index aliases given some predicates {pull}18732[#18732] (issue: {issue}18647[#18647])
-
-Java REST Client::
-* Low level Rest Client {pull}18735[#18735] (issue: {issue}7743[#7743])
-
-Mapping::
-* Expose half-floats. {pull}18887[#18887]
-
-Scroll::
-* Add the ability to partition a scroll in multiple slices. {pull}18237[#18237] (issue: {issue}13494[#13494])
-
-Store::
-* Expose MMapDirectory.preLoad(). {pull}18880[#18880]
-* Add primitive to shrink an index into a single shard {pull}18270[#18270]
-
-
-
-[[enhancement-5.0.0-alpha4]]
-[float]
-=== Enhancements
-
-Aggregations::
-* Automatically set the collection mode to breadth_first in the terms aggregation when the cardinality of the field is unknown or smaller than the requested size. {pull}18779[#18779] (issue: {issue}9825[#9825])
-* Rename PipelineAggregatorBuilder to PipelineAggregationBuilder. {pull}18677[#18677] (issue: {issue}18377[#18377])
-* AggregatorBuilder and PipelineAggregatorBuilder do not need generics. {pull}18368[#18368] (issue: {issue}18133[#18133])
-
-Allocation::
-* Allow `_shrink` to N shards if source shards is a multiple of N {pull}18699[#18699]
-* Only filter intial recovery (post API) when shrinking an index {pull}18661[#18661]
-* Estimate shard size for shrinked indices {pull}18659[#18659]
-* Only fail relocation target shard if failing source shard is a primary {pull}18574[#18574] (issue: {issue}16144[#16144])
-* Simplify delayed shard allocation {pull}18351[#18351] (issue: {issue}18293[#18293])
-
-Analysis::
-* Add a MultiTermAwareComponent marker interface to analysis factories. {pull}19028[#19028] (issues: {issue}18064[#18064], {issue}9978[#9978])
-* Add Flags Parameter for Char Filter {pull}18363[#18363] (issue: {issue}18362[#18362])
-
-Cache::
-* Cache FieldStats in the request cache {pull}18768[#18768] (issue: {issue}18717[#18717])
-
-Cluster::
-* Index creation does not cause the cluster health to go RED {pull}18737[#18737] (issues: {issue}9106[#9106], {issue}9126[#9126])
-* Cluster Health class improvements {pull}18673[#18673]
-
-Core::
-* Read Elasticsearch manifest via URL {pull}18999[#18999] (issue: {issue}18996[#18996])
-* Throw if the local node is not set {pull}18963[#18963] (issue: {issue}18962[#18962])
-* Improve performance of applyDeletedShards {pull}18788[#18788] (issue: {issue}18776[#18776])
-* Bootstrap check for OnOutOfMemoryError and seccomp {pull}18756[#18756] (issue: {issue}18736[#18736])
-
-Dates::
-* Improve TimeZoneRoundingTests error messages {pull}18895[#18895]
-* Improve TimeUnitRounding for edge cases and DST transitions {pull}18589[#18589]
-
-Expressions::
-* improve date api for expressions/painless fields {pull}18658[#18658]
-
-Index APIs::
-* Add Shrink request source parser to parse create index request body {pull}18802[#18802]
-
-Index Templates::
-* Parse and validate mappings on index template creation {pull}8802[#8802] (issue: {issue}2415[#2415])
-
-Ingest::
-* Add `ignore_failure` option to all ingest processors {pull}18650[#18650] (issue: {issue}18493[#18493])
-* new ScriptProcessor for Ingest {pull}18193[#18193]
-
-Internal::
-* Hot methods redux {pull}19016[#19016] (issue: {issue}16725[#16725])
-* Remove forked joda time BaseDateTime class {pull}18953[#18953]
-* Support optional ctor args in ConstructingObjectParser {pull}18725[#18725]
-* Remove thread pool from page cache recycler {pull}18664[#18664] (issue: {issue}18613[#18613])
-
-Java API::
-* Switch QueryBuilders to new MatchPhraseQueryBuilder {pull}18753[#18753]
-
-Logging::
-* Throw IllegalStateException when handshake fails due to version or cluster mismatch {pull}18676[#18676]
-
-Mapping::
-* Upgrade `string` fields to `text`/`keyword` even if `include_in_all` is set. {pull}19004[#19004] (issue: {issue}18974[#18974])
-
-Network::
-* Exclude admin / diagnostic requests from HTTP request limiting {pull}18833[#18833] (issues: {issue}17951[#17951], {issue}18145[#18145])
-* Do not start scheduled pings until transport start {pull}18702[#18702]
-
-Packaging::
-* Remove explicit parallel new GC flag {pull}18767[#18767]
-* Use JAVA_HOME or java.exe in PATH like the Linux scripts do {pull}18685[#18685] (issue: {issue}4913[#4913])
-
-Percolator::
-* Add percolator query extraction support for dismax query {pull}18845[#18845]
-* Improve percolate query performance by not verifying certain candidate matches {pull}18696[#18696]
-* Improve percolator query term extraction {pull}18610[#18610]
-
-Plugin Lang Painless::
-* Painless Initializers {pull}19012[#19012]
-* Add augmentation {pull}19003[#19003]
-* Infer lambda arguments/return type {pull}18983[#18983]
-* Fix explicit casts and improve tests. {pull}18958[#18958]
-* Add lambda captures {pull}18954[#18954]
-* improve Debugger to print code even if it hits exception {pull}18932[#18932] (issue: {issue}1[#1])
-* Move semicolon hack into lexer {pull}18931[#18931]
-* Add flag support to regexes {pull}18927[#18927]
-* improve lambda syntax (allow single expression) {pull}18924[#18924]
-* Remove useless dropArguments in megamorphic cache {pull}18913[#18913]
-* non-capturing lambda support {pull}18911[#18911] (issue: {issue}18824[#18824])
-* fix bugs in operators and more improvements for the dynamic case {pull}18899[#18899]
-* improve unary operators and cleanup tests {pull}18867[#18867] (issue: {issue}18849[#18849])
-* Add support for the find operator (=~) and the match operator (==~) {pull}18858[#18858]
-* Remove casts and boxing for dynamic math {pull}18849[#18849] (issue: {issue}18847[#18847])
-* Refactor def math {pull}18847[#18847]
-* Add support for /regex/ {pull}18842[#18842]
-* Array constructor references {pull}18831[#18831]
-* Method references to user functions {pull}18828[#18828]
-* Add } as a delimiter. {pull}18827[#18827] (issue: {issue}18821[#18821])
-* Add Lambda Stub Node {pull}18824[#18824]
-* Add capturing method references {pull}18818[#18818] (issue: {issue}18748[#18748])
-* Add Functions to Painless {pull}18810[#18810]
-* Add Method to Get New MethodWriters {pull}18771[#18771]
-* Static For Each {pull}18757[#18757]
-* Method reference support {pull}18748[#18748] (issue: {issue}18578[#18578])
-* Add support for the new Java 9 MethodHandles#arrayLength() factory {pull}18734[#18734]
-* Improve painless compile-time exceptions {pull}18711[#18711] (issue: {issue}18600[#18600])
-* add java.time packages to painless whitelist {pull}18621[#18621]
-* Add Function Reference Stub to Painless {pull}18578[#18578]
-
-Plugins::
-* Add did-you-mean for plugin cli {pull}18942[#18942] (issue: {issue}18896[#18896])
-* Plugins: Remove name() and description() from api {pull}18906[#18906]
-* Emit nicer error message when trying to install unknown plugin {pull}18876[#18876] (issue: {issue}17226[#17226])
-
-Query DSL::
-* Treat zero token in `common` terms query as MatchNoDocsQuery {pull}18656[#18656]
-* Handle empty query bodies at parse time and remove EmptyQueryBuilder {pull}17624[#17624] (issues: {issue}17540[#17540], {issue}17541[#17541])
-
-REST::
-* Adding status field in _msearch error request bodies {pull}18586[#18586] (issue: {issue}18013[#18013])
-
-Recovery::
-* index shard should be able to cancel check index on close. {pull}18839[#18839] (issue: {issue}12011[#12011])
-
-Reindex API::
-* Implement ctx.op = "delete" on _update_by_query and _reindex {pull}18614[#18614] (issue: {issue}18043[#18043])
-
-Scripting::
-* Compile each Groovy script in its own classloader {pull}18918[#18918] (issue: {issue}18572[#18572])
-* Include script field even if it value is null {pull}18384[#18384] (issue: {issue}16408[#16408])
-
-Scroll::
-* Add an index setting to limit the maximum number of slices allowed in a scroll request. {pull}18782[#18782]
-
-Search::
-* Change default similarity to BM25 {pull}18948[#18948] (issue: {issue}18944[#18944])
-* Add a parameter to cap the number of searches the msearch api will concurrently execute {pull}18721[#18721]
-
-Sequence IDs::
-* Persist sequence number checkpoints {pull}18949[#18949] (issue: {issue}10708[#10708])
-* Add sequence numbers to cat shards API {pull}18772[#18772]
-
-Settings::
-* Improve error message if a setting is not found {pull}18920[#18920] (issue: {issue}18663[#18663])
-* Cleanup placeholder replacement {pull}17335[#17335]
-
-Snapshot/Restore::
-* Adds UUIDs to snapshots {pull}18228[#18228] (issue: {issue}18156[#18156])
-* Clarify the semantics of the BlobContainer interface {pull}18157[#18157] (issue: {issue}15580[#15580])
-
-Stats::
-* Add total_indexing_buffer/_in_bytes to nodes info API {pull}18914[#18914] (issue: {issue}18651[#18651])
-* Allow FieldStatsRequest to disable cache {pull}18900[#18900]
-* Remove index_writer_max_memory stat from segment stats {pull}18651[#18651] (issues: {issue}14121[#14121], {issue}7440[#7440])
-* Move DocStats under Engine to get more accurate numbers {pull}18587[#18587]
-
-Task Manager::
-* Fetch result when wait_for_completion {pull}18905[#18905]
-* Create get task API that falls back to the .tasks index {pull}18682[#18682]
-* Add ability to store results for long running tasks {pull}17928[#17928]
-
-Translog::
-* Beef up Translog testing with random channel exceptions {pull}18997[#18997]
-* Do not replay into translog on local recovery {pull}18547[#18547]
-
-
-
-[[bug-5.0.0-alpha4]]
-[float]
-=== Bug fixes
-
-Allocation::
-* Fix recovery throttling to properly handle relocating non-primary shards {pull}18701[#18701] (issue: {issue}18640[#18640])
-
-CAT API::
-* Fix merge stats rendering in RestIndicesAction {pull}18720[#18720]
-
-CRUD::
-* Squash a race condition in RefreshListeners {pull}18806[#18806]
-
-Circuit Breakers::
-* Never trip circuit breaker in liveness request {pull}18627[#18627] (issue: {issue}17951[#17951])
-
-Cluster::
-* Fix block checks when no indices are specified {pull}19047[#19047] (issue: {issue}8105[#8105])
-* Acknowledge index deletion requests based on standard cluster state acknowledgment {pull}18602[#18602] (issues: {issue}16442[#16442], {issue}18558[#18558])
-
-Core::
-* Throw exception if using a closed transport client {pull}18722[#18722] (issue: {issue}18708[#18708])
-
-Dates::
-* Fix invalid rounding value for TimeIntervalRounding close to DST transitions {pull}18800[#18800]
-* Fix problem with TimeIntervalRounding on DST end {pull}18780[#18780]
-
-Expressions::
-* replace ScriptException with a better one {pull}18600[#18600]
-
-Ingest::
-* Fix ignore_failure behavior in _simulate?verbose and more cleanup {pull}18987[#18987]
-
-Internal::
-* Fix filtering of node ids for TransportNodesAction {pull}18634[#18634] (issue: {issue}18618[#18618])
-
-Mapping::
-* Better error message when mapping configures null {pull}18809[#18809] (issue: {issue}18803[#18803])
-* Process dynamic templates in order. {pull}18638[#18638] (issues: {issue}18625[#18625], {issue}2401[#2401])
-
-Packaging::
-* Remove extra bin/ directory in bin folder {pull}18630[#18630]
-
-Plugin Lang Painless::
-* Fix compound assignment with string concats {pull}18933[#18933] (issue: {issue}18929[#18929])
-* Fix horrible capture {pull}18907[#18907] (issue: {issue}18899[#18899])
-* Fix Casting Bug {pull}18871[#18871]
-
-Query DSL::
-* Make parsing of bool queries stricter {pull}19052[#19052] (issue: {issue}19034[#19034])
-
-REST::
-* Get XContent params from request in Nodes rest actions {pull}18860[#18860] (issue: {issue}18794[#18794])
-
-Reindex API::
-* Fix a race condition in reindex's rethrottle {pull}18731[#18731] (issue: {issue}18744[#18744])
-
-Search::
-* Require timeout units when parsing query body {pull}19077[#19077] (issue: {issue}19075[#19075])
-* Close SearchContext if query rewrite failed {pull}18727[#18727]
-
-Settings::
-* Register "cloud.node.auto_attributes" setting in EC2 discovery plugin {pull}18678[#18678]
-
-Snapshot/Restore::
-* Better handling of an empty shard's segments_N file {pull}18784[#18784] (issue: {issue}18707[#18707])
-
-Stats::
-* Fix sync flush total shards statistics {pull}18766[#18766]
-
-Translog::
-* Fix translog replay multiple operations same doc {pull}18611[#18611] (issues: {issue}18547[#18547], {issue}18623[#18623])
-
-
-
-[[upgrade-5.0.0-alpha4]]
-[float]
-=== Upgrades
-
-Core::
-* Upgrade to Lucene 6.1.0. {pull}18926[#18926]
-* Upgrade to lucene-6.1.0-snapshot-3a57bea. {pull}18786[#18786]
-* Upgrade to Lucene 6.0.1. {pull}18648[#18648] (issues: {issue}17535[#17535], {issue}28[#28])
-
-Dates::
-* Upgrade joda-time to 2.9.4 {pull}18609[#18609] (issues: {issue}14524[#14524], {issue}18017[#18017])
-
-Packaging::
-* Upgrade JNA to 4.2.2 and remove optionality {pull}19045[#19045] (issue: {issue}13245[#13245])
-
-Plugin Discovery EC2::
-* Update aws sdk to 1.10.69 and add use_throttle_retries repository setting {pull}17784[#17784] (issues: {issue}538[#538], {issue}586[#586], {issue}589[#589])
-
-
-
diff --git a/docs/reference/release-notes/5.0.0-alpha5.asciidoc b/docs/reference/release-notes/5.0.0-alpha5.asciidoc
deleted file mode 100644
index 9a0605ea4f..0000000000
--- a/docs/reference/release-notes/5.0.0-alpha5.asciidoc
+++ /dev/null
@@ -1,463 +0,0 @@
-[[release-notes-5.0.0-alpha5]]
-== 5.0.0-alpha5 Release Notes
-
-Also see <<breaking-changes-5.0>>.
-
-IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha5 to any other version is not supported.
-
-[IMPORTANT]
-.Known networking bug in 5.0.0-alpha5
-======================================================
-
-There is a major bug in the new Netty4 implementation in this release which
-affects any REST requests greater than 1024 bytes in size, and which will
-generate an exception similar to the following:
-
-[source,txt]
-----
-[WARN ][http.netty4] [wtOV9Vb] caught exception while handling client http traffic, closing connection [id: 0x1320b717, L:/0:0:0:0:0:0:0:1:9200 - R:/0:0:0:0:0:0:0:1:54732]
-java.lang.UnsupportedOperationException: unsupported message type: DefaultFullHttpResponse (expected: ByteBuf, FileRegion)
-----
-
-This is due to incorrect handling of the `Expect` HTTP header, and it can be
-worked around in one of three ways:
-
-* Use a client which does not add `Expect` headers (including the official clients).
-
-* Pass a blank `Except` header, e.g.
-+
-[source,sh]
-----
-curl -H 'Expect:' ...
-----
-
-* Use Netty3 for the HTTP layer by passing the following setting at startup:
-+
-[source,sh]
-----
-./bin/elasticsearch -Ehttp.type=netty3
-----
-
-======================================================
-
-[[breaking-5.0.0-alpha5]]
-[float]
-=== Breaking changes
-
-CAT API::
-* Improve cat thread pool API {pull}19721[#19721] (issue: {issue}19590[#19590])
-
-Cluster::
-* Persistent Node Ids {pull}19140[#19140] (issue: {issue}17811[#17811])
-
-Core::
-* Keep input time unit when parsing TimeValues {pull}19102[#19102]
-
-Exceptions::
-* Die with dignity {pull}19272[#19272] (issue: {issue}19231[#19231])
-
-Index APIs::
-* Removes write consistency level across replication action APIs in favor of wait_for_active_shards {pull}19454[#19454] (issue: {issue}18985[#18985])
-
-Scripting::
-* Remove deprecated 1.x script and template syntax {pull}19387[#19387] (issue: {issue}13729[#13729])
-
-Search::
-* Rename `fields` to `stored_fields` and add `docvalue_fields` {pull}18992[#18992] (issue: {issue}18943[#18943])
-
-Settings::
-* Persistent Node Names {pull}19456[#19456] (issue: {issue}19140[#19140])
-* Remove support for properties {pull}19398[#19398] (issues: {issue}19388[#19388], {issue}19391[#19391])
-
-
-
-[[breaking-java-5.0.0-alpha5]]
-[float]
-=== Breaking Java changes
-
-CRUD::
-* Removing isCreated and isFound from the Java API {pull}19645[#19645] (issues: {issue}19566[#19566], {issue}19631[#19631])
-
-Internal::
-* Clean up BytesReference {pull}19196[#19196]
-
-Java API::
-* Add a dedicated client/transport project for transport-client {pull}19435[#19435] (issue: {issue}19412[#19412])
-
-Network::
-* Factor out abstract TCPTransport* classes to reduce the netty footprint {pull}19096[#19096]
-
-Plugins::
-* Migrate query registration from push to pull {pull}19376[#19376]
-* Add components getter as bridge between guice and new plugin init world {pull}19371[#19371]
-* Remove CustomNodeAttributes extension point {pull}19348[#19348]
-* Add RepositoryPlugin interface for registering snapshot repositories {pull}19324[#19324]
-* Simplified repository api for snapshot/restore {pull}19292[#19292]
-* Switch most search extensions from push to pull {pull}19238[#19238]
-* Move RestHandler registration to ActionModule and ActionPlugin {pull}19165[#19165]
-* Pull actions from plugins {pull}19108[#19108]
-* Switch analysis from push to pull {pull}19073[#19073]
-* Remove guice from Mapper plugins {pull}19018[#19018]
-
-Scripting::
-* Remove o.e.script.Template class and move template query to lang-mustache module {pull}19425[#19425] (issue: {issue}16314[#16314])
-
-Settings::
-* Remove `node.mode` and `node.local` settings {pull}19428[#19428]
-
-Snapshot/Restore::
-* Removes extra writeBlob method in BlobContainer {pull}19727[#19727] (issue: {issue}18528[#18528])
-
-
-
-[[deprecation-5.0.0-alpha5]]
-[float]
-=== Deprecations
-
-CRUD::
-* Deprecate found and created in delete and index rest responses {pull}19633[#19633]
-
-Plugin Discovery Azure Classic::
-* Deprecate discovery-azure and rename it to discovery-azure-classic {pull}19186[#19186] (issue: {issue}19144[#19144])
-
-Templates::
-* Deprecate template query {pull}19607[#19607] (issue: {issue}19390[#19390])
-
-
-
-[[feature-5.0.0-alpha5]]
-[float]
-=== New features
-
-Aggregations::
-* Split regular histograms from date histograms. {pull}19551[#19551] (issues: {issue}4847[#4847], {issue}8082[#8082])
-
-Circuit Breakers::
-* Circuit break on aggregation bucket numbers with request breaker {pull}19394[#19394] (issue: {issue}14046[#14046])
-
-Ingest::
-* ingest-useragent plugin {pull}19074[#19074]
-
-Java REST Client::
-* Introduce async performRequest method {pull}19400[#19400]
-
-Mapping::
-* Add `scaled_float`. {pull}19264[#19264] (issues: {issue}15939[#15939], {issue}1941[#1941])
-
-Plugin Repository S3::
-* Add support for path_style_access {pull}15114[#15114]
-
-Reindex API::
-* Reindex from remote {pull}18585[#18585] (issue: {issue}17447[#17447])
-
-Translog::
-* Add `elasticsearch-translog` CLI tool with `truncate` command {pull}19342[#19342] (issue: {issue}19123[#19123])
-
-
-
-[[enhancement-5.0.0-alpha5]]
-[float]
-=== Enhancements
-
-Aggregations::
-* Make the heuristic to compute the default shard size less aggressive. {pull}19659[#19659]
-* Add _bucket_count option to buckets_path {pull}19571[#19571] (issue: {issue}19553[#19553])
-* Remove AggregationStreams {pull}19507[#19507]
-* Migrate serial_diff aggregation to NamedWriteable {pull}19483[#19483]
-* Migrate most remaining pipeline aggregations to NamedWriteable {pull}19480[#19480]
-* Migrate moving_avg pipeline aggregation to NamedWriteable {pull}19420[#19420]
-* Migrate matrix_stats to NamedWriteable {pull}19418[#19418]
-* Migrate derivative pipeline aggregation to NamedWriteable {pull}19407[#19407]
-* Migrate top_hits, histogram, and ip_range aggregations to NamedWriteable {pull}19375[#19375]
-* Migrate nested, reverse_nested, and children aggregations to NamedWriteable {pull}19374[#19374]
-* Migrate geohash_grid and geo_bounds aggregations to NamedWriteable {pull}19372[#19372]
-* Clean up significant terms aggregation results {pull}19365[#19365]
-* Migrate range, date_range, and geo_distance aggregations to NamedWriteable {pull}19321[#19321]
-* Migrate terms aggregation to NamedWriteable {pull}19277[#19277]
-* Migrate sampler and missing aggregations to NamedWriteable {pull}19259[#19259]
-* Migrate global, filter, and filters aggregation to NamedWriteable {pull}19220[#19220]
-* Migrate the cardinality, scripted_metric, and geo_centroid aggregations to NamedWriteable {pull}19219[#19219]
-* Use a static default precision for the cardinality aggregation. {pull}19215[#19215]
-* Migrate more aggregations to NamedWriteable {pull}19199[#19199]
-* Migrate stats and extended stats to NamedWriteable {pull}19198[#19198]
-* Migrate sum, min, and max aggregations over to NamedWriteable {pull}19194[#19194]
-* Start migration away from aggregation streams {pull}19097[#19097]
-
-Analysis::
-* Specify custom char_filters/tokenizer/token_filters in the analyze API {pull}15671[#15671] (issue: {issue}8878[#8878])
-
-CAT API::
-* Includes the index UUID in the _cat/indices API {pull}19204[#19204] (issue: {issue}19132[#19132])
-
-CRUD::
-* #19664 Renaming operation to result and reworking responses {pull}19704[#19704] (issue: {issue}19664[#19664])
-* Adding _operation field to index, update, delete response. {pull}19566[#19566] (issues: {issue}19267[#19267], {issue}9642[#9642], {issue}9736[#9736])
-
-Cache::
-* Enable option to use request cache for size > 0 {pull}19472[#19472]
-
-Cluster::
-* Batch process node left and node failure {pull}19289[#19289] (issue: {issue}19282[#19282])
-* Index creation waits for write consistency shards {pull}18985[#18985]
-* Inline reroute with process of node join/master election {pull}18938[#18938] (issue: {issue}17270[#17270])
-
-Core::
-* Makes index creation more friendly {pull}19450[#19450] (issue: {issue}9126[#9126])
-* Clearer error when handling fractional time values {pull}19158[#19158] (issue: {issue}19102[#19102])
-
-Discovery::
-* Do not log cluster service errors at after joining a master {pull}19705[#19705]
-
-Exceptions::
-* Make NotMasterException a first class citizen {pull}19385[#19385]
-* Do not catch throwable {pull}19231[#19231]
-
-Geo::
-* GeoBoundingBoxQueryBuilder should throw IAE when topLeft and bottomRight are the same coordinate {pull}18668[#18668] (issues: {issue}18458[#18458], {issue}18631[#18631])
-
-Ingest::
-* Add REST _ingest/pipeline to get all pipelines {pull}19603[#19603] (issue: {issue}19585[#19585])
-* Show ignored errors in verbose simulate result {pull}19404[#19404] (issue: {issue}19319[#19319])
-* update foreach processor to only support one applied processor. {pull}19402[#19402] (issue: {issue}19345[#19345])
-* Skip the execution of an empty pipeline {pull}19200[#19200] (issue: {issue}19192[#19192])
-
-Internal::
-* Make Priority an enum {pull}19448[#19448]
-* Snapshot UUIDs in blob names {pull}19421[#19421] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19002[#19002], {issue}7540[#7540])
-* Add RestController method for deprecating in one step {pull}19343[#19343]
-* Tighten ensure atomic move cleanup {pull}19309[#19309] (issue: {issue}19036[#19036])
-* Enable checkstyle ModifierOrder {pull}19214[#19214]
-* Expose task information from NodeClient {pull}19189[#19189]
-* Changed rest handler interface to take NodeClient {pull}19170[#19170]
-* Deprecate ExceptionsHelper.detailedMessage {pull}19160[#19160] (issue: {issue}19069[#19069])
-* Factor out ChannelBuffer from BytesReference {pull}19129[#19129]
-* Cleanup Compressor interface {pull}19125[#19125]
-* Require executor name when calling scheduleWithFixedDelay {pull}17538[#17538]
-
-Java API::
-* Start from a random node number so that clients do not overload the first node configured {pull}14143[#14143]
-
-Java REST Client::
-* Add response body to ResponseException error message {pull}19653[#19653] (issue: {issue}19653[#19653])
-* Simplify Sniffer initialization and automatically create the default HostsSniffer {pull}19599[#19599]
-* Remove duplicate dependency declaration for http client {pull}19580[#19580] (issue: {issue}19281[#19281])
-* Add callback to customize http client settings {pull}19373[#19373]
-* Rest Client: add short performRequest method variants without params and/or body {pull}19340[#19340] (issue: {issue}19312[#19312])
-
-Logging::
-* Add log message about enforcing bootstrap checks {pull}19451[#19451]
-* Improve logging for batched cluster state updates {pull}19255[#19255]
-* Send HTTP Warning Header(s) for any Deprecation Usage from a REST request {pull}17804[#17804] (issue: {issue}17687[#17687])
-
-Mapping::
-* Elasticsearch should reject dynamic templates with unknown `match_mapping_type`. {pull}17285[#17285] (issue: {issue}16945[#16945])
-
-Network::
-* Explicitly tell Netty to not use unsafe {pull}19786[#19786] (issues: {issue}19562[#19562], {issue}5624[#5624])
-* Enable Netty 4 extensions {pull}19767[#19767] (issue: {issue}19526[#19526])
-* Modularize netty {pull}19392[#19392]
-* Simplify TcpTransport interface by reducing send code to a single send method {pull}19223[#19223]
-
-Percolator::
-* Also support query term extract for queries wrapped inside a FunctionScoreQuery {pull}19184[#19184]
-* Add support for synonym query to percolator query term extraction {pull}19066[#19066]
-
-Plugin Discovery EC2::
-* Use `DefaultAWSCredentialsProviderChain` AWS SDK class for credentials {pull}19561[#19561] (issue: {issue}19556[#19556])
-* Support new Asia Pacific (Mumbai) ap-south-1 AWS region {pull}19112[#19112] (issue: {issue}19110[#19110])
-
-Plugin Discovery GCE::
-* Allow `_gce_` network when not using discovery gce {pull}15765[#15765] (issue: {issue}15724[#15724])
-
-Plugin Lang Painless::
-* Change Painless Tree Structure for Variable/Method Chains {pull}19459[#19459]
-* Add replaceAll and replaceFirst {pull}19070[#19070]
-
-Plugin Mapper Size::
-* Add doc values support to the _size field in the mapper-size plugin {pull}19217[#19217] (issue: {issue}18334[#18334])
-
-Plugins::
-* Add ScriptService to dependencies available for plugin components {pull}19770[#19770]
-* Log one plugin info per line {pull}19441[#19441]
-* Make rest headers registration pull based {pull}19440[#19440]
-* Add resource watcher to services available for plugin components {pull}19401[#19401]
-* Add some basic services to createComponents for plugins {pull}19380[#19380]
-* Make plugins closeable {pull}19137[#19137]
-* Plugins: Add status bar on download {pull}18695[#18695]
-
-Query DSL::
-* Allow empty json object in request body in `_count` API {pull}19595[#19595] (issue: {issue}19422[#19422])
-
-REST::
-* Add Location header to the index, update, and create APIs {pull}19509[#19509] (issue: {issue}19079[#19079])
-* Add support for `wait_for_events` to the `_cluster/health` REST endpoint {pull}19432[#19432] (issue: {issue}19419[#19419])
-* Rename Search Template REST spec names {pull}19178[#19178]
-
-Recovery::
-* Non-blocking primary relocation hand-off {pull}19013[#19013] (issues: {issue}15900[#15900], {issue}18553[#18553])
-
-Reindex API::
-* Only ask for `_version` we need it {pull}19693[#19693] (issue: {issue}19135[#19135])
-* Use fewer threads when reindexing-from-remote {pull}19636[#19636]
-* Support authentication with reindex-from-remote {pull}19310[#19310]
-* Support requests_per_second=-1 to mean no throttling in reindex {pull}19101[#19101] (issue: {issue}19089[#19089])
-
-Scripting::
-* Remove ClusterState from compile api {pull}19136[#19136]
-* Mustache: Render Map as JSON {pull}18856[#18856] (issue: {issue}18970[#18970])
-
-Search::
-* Limit batch size when scrolling {pull}19367[#19367] (issue: {issue}19249[#19249])
-* Record method counts while profiling query components {pull}18302[#18302]
-
-Settings::
-* Validates new dynamic settings from the current state {pull}19122[#19122] (issue: {issue}19046[#19046])
-
-Snapshot/Restore::
-* BlobContainer#writeBlob no longer can overwrite a blob {pull}19749[#19749] (issue: {issue}15579[#15579])
-* More resilient blob handling in snapshot repositories {pull}19706[#19706] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19421[#19421], {issue}7540[#7540])
-* Adding repository index generational files {pull}19002[#19002] (issue: {issue}18156[#18156])
-* Raised IOException on deleteBlob {pull}18815[#18815] (issue: {issue}18530[#18530])
-
-Stats::
-* Add missing field type in the FieldStats response. {pull}19241[#19241] (issue: {issue}17750[#17750])
-* Expose the ClusterInfo object in the allocation explain output {pull}19106[#19106] (issue: {issue}14405[#14405])
-
-
-
-[[bug-5.0.0-alpha5]]
-[float]
-=== Bug fixes
-
-Aggregations::
-* Undeprecates `aggs` in the search request {pull}19674[#19674] (issue: {issue}19504[#19504])
-* Change how `nested` and `reverse_nested` aggs know about their nested depth level {pull}19550[#19550] (issues: {issue}11749[#11749], {issue}12410[#12410])
-* Make ExtendedBounds immutable {pull}19490[#19490] (issue: {issue}19481[#19481])
-* Fix potential AssertionError with include/exclude on terms aggregations. {pull}19252[#19252] (issue: {issue}18575[#18575])
-* Pass resolved extended bounds to unmapped histogram aggregator {pull}19085[#19085] (issue: {issue}19009[#19009])
-* Fix "key_as_string" for date histogram and epoch_millis/epoch_second format with time zone {pull}19043[#19043] (issue: {issue}19038[#19038])
-
-Allocation::
-* Fix NPE when initializing replica shard has no UnassignedInfo {pull}19491[#19491] (issue: {issue}19488[#19488])
-* Make shard store fetch less dependent on the current cluster state, both on master and non data nodes {pull}19044[#19044] (issue: {issue}18938[#18938])
-
-Analysis::
-* Fix analyzer alias processing {pull}19506[#19506] (issue: {issue}19163[#19163])
-
-CAT API::
-* Fixes cat tasks operation in detailed mode {pull}19759[#19759] (issue: {issue}19755[#19755])
-* Add index pattern wildcards support to _cat/shards {pull}19655[#19655] (issue: {issue}19634[#19634])
-
-Cluster::
-* Allow routing table to be filtered by index pattern {pull}19688[#19688]
-* Use executor's describeTasks method to log task information in cluster service {pull}19531[#19531]
-
-Core::
-* Makes `m` case sensitive in TimeValue {pull}19649[#19649] (issue: {issue}19619[#19619])
-* Guard against negative result from FileStore.getUsableSpace when picking data path for a new shard {pull}19554[#19554]
-* Handle rejected execution exception on reschedule {pull}19505[#19505]
-
-Dates::
-* Make sure TimeIntervalRounding is monotonic for increasing dates {pull}19020[#19020]
-
-Geo::
-* Incomplete results when using geo_distance for large distances {pull}17578[#17578]
-
-Highlighting::
-* Plain highlighter should ignore parent/child queries {pull}19616[#19616] (issue: {issue}14999[#14999])
-* Let fast vector highlighter also extract terms from the nested query's inner query. {pull}19337[#19337] (issue: {issue}19265[#19265])
-
-Index APIs::
-* Fixes active shard count check in the case of `all` shards {pull}19760[#19760]
-* Add zero-padding to auto-generated rollover index name increment {pull}19610[#19610] (issue: {issue}19484[#19484])
-
-Ingest::
-* Fix NPE when simulating a pipeline with no id {pull}19650[#19650]
-* Change foreach processor to use ingest metadata for array element {pull}19609[#19609] (issue: {issue}19592[#19592])
-* No other processors should be executed after on_failure is called {pull}19545[#19545]
-* rethrow script compilation exceptions into ingest configuration exceptions {pull}19318[#19318]
-* Rename from `ingest-useragent` plugin to `ingest-user-agent` and its processor from `useragent` to `user_agent` {pull}19261[#19261]
-
-Inner Hits::
-* Ensure that that InnerHitBuilder uses rewritten queries {pull}19360[#19360] (issue: {issue}19353[#19353])
-
-Internal::
-* Priority values should be unmodifiable {pull}19447[#19447]
-* Extract AbstractBytesReferenceTestCase {pull}19141[#19141]
-
-Java REST Client::
-* Rest Client: add slash to log line when missing between host and uri {pull}19325[#19325] (issue: {issue}19314[#19314])
-* Rest Client: HostsSniffer to set http as default scheme {pull}19306[#19306]
-
-Logging::
-* Only log running out of slots when out of slots {pull}19637[#19637]
-
-Mapping::
-* Mappings: Fix detection of metadata fields in documents {pull}19765[#19765]
-* Fix not_analyzed string fields to error when position_increment_gap is set {pull}19510[#19510]
-* Automatically created indices should honor `index.mapper.dynamic`. {pull}19478[#19478] (issue: {issue}17592[#17592])
-
-Network::
-* Verify lower level transport exceptions don't bubble up on disconnects {pull}19518[#19518] (issue: {issue}19096[#19096])
-
-Packaging::
-* Disable service in pre-uninstall {pull}19328[#19328]
-
-Parent/Child::
-* Make sure that no `_parent#null` gets introduces as default _parent mapping {pull}19470[#19470] (issue: {issue}19389[#19389])
-
-Plugin Discovery Azure Classic::
-* Make discovery-azure plugin work again {pull}19062[#19062] (issues: {issue}15630[#15630], {issue}18637[#18637])
-
-Plugin Discovery EC2::
-* Fix EC2 discovery settings {pull}18690[#18690] (issues: {issue}18652[#18652], {issue}18662[#18662])
-
-Plugin Discovery GCE::
-* Fix NPE when GCE region is empty {pull}19176[#19176] (issue: {issue}16967[#16967])
-
-Plugin Repository Azure::
-* Register group setting for repository-azure accounts {pull}19086[#19086]
-
-Plugin Repository S3::
-* Add missing permission to repository-s3 {pull}19128[#19128] (issues: {issue}18539[#18539], {issue}19121[#19121])
-* Fix repository S3 Settings and add more tests {pull}18703[#18703] (issues: {issue}18662[#18662], {issue}18690[#18690])
-
-Query DSL::
-* Throw ParsingException if a query is wrapped in an array {pull}19750[#19750] (issue: {issue}12887[#12887])
-* Restore parameter name auto_generate_phrase_queries {pull}19514[#19514] (issue: {issue}19512[#19512])
-
-REST::
-* Fixes CORS handling so that it uses the defaults {pull}19522[#19522] (issue: {issue}19520[#19520])
-
-Recovery::
-* Move `reset recovery` into RecoveriesCollection {pull}19466[#19466] (issue: {issue}19473[#19473])
-* Fix replica-primary inconsistencies when indexing during primary relocation with ongoing replica recoveries {pull}19287[#19287] (issue: {issue}19248[#19248])
-
-Search::
-* Don't recursively count children profile timings {pull}19397[#19397] (issue: {issue}18693[#18693])
-* fix explain in function_score if no function filter matches {pull}19185[#19185]
-* Fix NPEs due to disabled source {pull}18957[#18957]
-
-Settings::
-* Validate settings against dynamic updaters on the master {pull}19088[#19088] (issue: {issue}19046[#19046])
-
-Stats::
-* Fix serialization bug in allocation explain API. {pull}19494[#19494]
-* Allocation explain: Also serialize `includeDiskInfo` field {pull}19492[#19492]
-
-Store::
-* Tighten up concurrent store metadata listing and engine writes {pull}19684[#19684] (issue: {issue}19416[#19416])
-* Make static Store access shard lock aware {pull}19416[#19416] (issue: {issue}18938[#18938])
-* Catch assertion errors on commit and turn it into a real exception {pull}19357[#19357] (issue: {issue}19356[#19356])
-
-
-
-[[upgrade-5.0.0-alpha5]]
-[float]
-=== Upgrades
-
-Network::
-* Dependencies: Upgrade to netty 4.1.4 {pull}19689[#19689]
-* Introduce Netty 4 {pull}19526[#19526] (issue: {issue}3226[#3226])
-* Upgrade to netty 3.10.6.Final {pull}19235[#19235]
-
diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc
index a4de20ee21..61d807cc21 100644
--- a/docs/reference/search.asciidoc
+++ b/docs/reference/search.asciidoc
@@ -18,13 +18,14 @@ when indexing tweets, the routing value can be the user name:
[source,js]
--------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/tweet?routing=kimchy' -d '{
+POST /twitter/tweet?routing=kimchy
+{
"user" : "kimchy",
"postDate" : "2009-11-15T14:12:12",
"message" : "trying out Elasticsearch"
}
-'
--------------------------------------------------
+// CONSOLE
In such a case, if we want to search only on the tweets for a specific
user, we can specify it as the routing, resulting in the search hitting
@@ -32,7 +33,8 @@ only the relevant shard:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/tweet/_search?routing=kimchy' -d '{
+POST /twitter/tweet/_search?routing=kimchy
+{
"query": {
"bool" : {
"must" : {
@@ -46,8 +48,9 @@ $ curl -XGET 'http://localhost:9200/twitter/tweet/_search?routing=kimchy' -d '{
}
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[continued]
The routing parameter can be multi valued represented as a comma
separated string. This will result in hitting the relevant shards where
@@ -65,6 +68,7 @@ the request with two different groups:
[source,js]
--------------------------------------------------
+POST /_search
{
"query" : {
"match_all" : {}
@@ -72,6 +76,8 @@ the request with two different groups:
"stats" : ["group1", "group2"]
}
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
[float]
[[global-search-timeout]]
diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc
index 125f3124bf..558071eedc 100644
--- a/docs/reference/search/explain.asciidoc
+++ b/docs/reference/search/explain.asciidoc
@@ -15,35 +15,70 @@ Full query example:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/tweet/1/_explain' -d '{
+GET /twitter/tweet/0/_explain
+{
"query" : {
- "term" : { "message" : "search" }
+ "match" : { "message" : "elasticsearch" }
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
This will yield the following result:
[source,js]
--------------------------------------------------
{
- "matches" : true,
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "0",
+ "matched" : true,
"explanation" : {
- "value" : 0.15342641,
- "description" : "fieldWeight(message:search in 0), product of:",
+ "value" : 1.55077,
+ "description" : "sum of:",
"details" : [ {
- "value" : 1.0,
- "description" : "tf(termFreq(message:search)=1)"
- }, {
- "value" : 0.30685282,
- "description" : "idf(docFreq=1, maxDocs=1)"
+ "value" : 1.55077,
+ "description" : "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
+ "details" : [ {
+ "value" : 1.55077,
+ "description" : "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:",
+ "details" : [ {
+ "value" : 1.3862944,
+ "description" : "idf(docFreq=1, docCount=5)",
+ "details" : [ ]
+ }, {
+ "value" : 1.1186441,
+ "description" : "tfNorm, computed from:",
+ "details" : [
+ { "value" : 1.0, "description" : "termFreq=1.0", "details" : [ ] },
+ { "value" : 1.2, "description" : "parameter k1", "details" : [ ] },
+ { "value" : 0.75, "description" : "parameter b", "details" : [ ] },
+ { "value" : 5.4, "description" : "avgFieldLength", "details" : [ ] },
+ { "value" : 4.0, "description" : "fieldLength", "details" : [ ] }
+ ]
+ } ]
+ } ]
}, {
- "value" : 0.5,
- "description" : "fieldNorm(field=message, doc=0)"
+ "value" : 0.0,
+ "description" : "match on required clause, product of:",
+ "details" : [ {
+ "value" : 0.0,
+ "description" : "# clause",
+ "details" : [ ]
+ }, {
+ "value" : 1.0,
+ "description" : "_type:tweet, product of:",
+ "details" : [
+ { "value" : 1.0, "description" : "boost", "details" : [ ] },
+ { "value" : 1.0, "description" : "queryNorm", "details" : [ ] }
+ ]
+ } ]
} ]
}
}
--------------------------------------------------
+// TESTRESPONSE
There is also a simpler way of specifying the query via the `q`
parameter. The specified `q` parameter value is then parsed as if the
@@ -52,8 +87,10 @@ explain api:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/tweet/1/_explain?q=message:search'
+GET /twitter/tweet/0/_explain?q=message:search
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
This will yield the same result as the previous request.
@@ -66,7 +103,7 @@ This will yield the same result as the previous request.
Set to `true` to retrieve the `_source` of the document explained. You can also
retrieve part of the document by using `_source_include` & `_source_exclude` (see <<get-source-filtering,Get API>> for more details)
-`fields`::
+`stored_fields`::
Allows to control which stored fields to return as part of the
document explained.
diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc
index d32cd45d87..293fb530cf 100644
--- a/docs/reference/search/multi-search.asciidoc
+++ b/docs/reference/search/multi-search.asciidoc
@@ -80,3 +80,36 @@ This default is based on the number of data nodes and the default search thread
=== Security
See <<url-access-control>>
+
+[float]
+[[template-msearch]]
+=== Template support
+
+Much like described in <<search-template>> for the _search resource, _msearch
+also provides support for templates. Submit them like follows:
+
+[source,js]
+-----------------------------------------------
+$ cat requests
+{"index" : "main"}
+{ "inline" : "{ \"query\": { \"match_{{template}}\": {} } }", "params": { "template": "all" } }
+{"index" : "main"}
+{ "inline" : "{ \"query\": { \"match_{{template}}\": {} } }", "params": { "template": "all" } }
+
+$ curl -XGET localhost:9200/_msearch/template --data-binary @requests; echo
+-----------------------------------------------
+
+for inline templates. Alternatively for stored templates:
+
+[source,js]
+-----------------------------------------------
+$ cat requests
+{"index" : "main"}
+{ "template": { "id": "template1" },"params": { "q": "foo" } }
+{"index" : "main"}
+{ "template": { "id": "template2" },"params": { "q": "bar" } }
+
+$ curl -XGET localhost:9200/_msearch/template --data-binary @requests; echo
+----------------------------------------------
+
+
diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc
index 60e3e7e8ca..755b5f7a24 100644
--- a/docs/reference/search/percolate.asciidoc
+++ b/docs/reference/search/percolate.asciidoc
@@ -3,6 +3,4 @@
deprecated[5.0.0,Percolate and multi percolate APIs are deprecated and have been replaced by the new <<query-dsl-percolate-query,`percolate` query>>]
-added[5.0.0,Percolate query modifications only become visible after a refresh has occurred. Previously, they became visible immediately]
-
-added[5.0.0,For indices created on or after version 5.0.0-alpha1 the percolator automatically indexes the query terms with the percolator queries. This allows the percolator to percolate documents more quickly. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization]
+For indices created on or after version 5.0.0-alpha1 the percolator automatically indexes the query terms with the percolator queries. This allows the percolator to percolate documents more quickly. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization.
diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc
index e2c22caf6f..0a03b32285 100644
--- a/docs/reference/search/profile.asciidoc
+++ b/docs/reference/search/profile.asciidoc
@@ -17,13 +17,17 @@ Any `_search` request can be profiled by adding a top-level `profile` parameter:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/_search' -d '{
+GET /_search
+{
"profile": true,<1>
"query" : {
- "match" : { "message" : "search test" }
+ "match" : { "message" : "message number" }
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
<1> Setting the top-level `profile` parameter to `true` will enable profiling
for the search
@@ -40,70 +44,70 @@ This will yield the following result:
"failed": 0
},
"hits": {
- "total": 3,
- "max_score": 1.078072,
- "hits": [ ... ] <1>
+ "total": 4,
+ "max_score": 0.5093388,
+ "hits": [...] <1>
},
"profile": {
"shards": [
{
- "id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]",
+ "id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]",
"searches": [
{
"query": [
{
"type": "BooleanQuery",
- "description": "message:search message:test",
- "time": "15.52889800ms",
+ "description": "message:message message:number",
+ "time": "1.873811000ms",
"breakdown": {
- "score": 6352,
- "score_count": 1,
- "build_scorer": 1800776,
+ "score": 51306,
+ "score_count": 4,
+ "build_scorer": 2935582,
"build_scorer_count": 1,
"match": 0,
"match_count": 0,
- "create_weight": 667400,
+ "create_weight": 919297,
"create_weight_count": 1,
- "next_doc": 10563,
- "next_doc_count": 2,
+ "next_doc": 53876,
+ "next_doc_count": 5,
"advance": 0,
"advance_count": 0
},
"children": [
{
"type": "TermQuery",
- "description": "message:search",
- "time": "4.938855000ms",
+ "description": "message:message",
+ "time": "0.3919430000ms",
"breakdown": {
- "score": 0,
- "score_count": 0,
- "build_scorer": 3230,
+ "score": 28776,
+ "score_count": 4,
+ "build_scorer": 784451,
"build_scorer_count": 1,
"match": 0,
"match_count": 0,
- "create_weight": 415612,
+ "create_weight": 1669564,
"create_weight_count": 1,
- "next_doc": 0,
- "next_doc_count": 0,
+ "next_doc": 10111,
+ "next_doc_count": 5,
"advance": 0,
"advance_count": 0
}
},
{
"type": "TermQuery",
- "description": "message:test",
- "time": "0.5016660000ms",
+ "description": "message:number",
+ "time": "0.2106820000ms",
"breakdown": {
- "score": 5014,
- "score_count": 1,
- "build_scorer": 1689333,
+ "score": 4552,
+ "score_count": 4,
+ "build_scorer": 42602,
"build_scorer_count": 1,
"match": 0,
"match_count": 0,
- "create_weight": 166587,
+ "create_weight": 89323,
"create_weight_count": 1,
- "next_doc": 5542,
- "next_doc_count": 2,
+ "next_doc": 2852,
+ "next_doc_count": 5,
"advance": 0,
"advance_count": 0
}
@@ -111,21 +115,44 @@ This will yield the following result:
]
}
],
- "rewrite_time": 870954,
+ "rewrite_time": 51443,
"collector": [
{
"name": "SimpleTopScoreDocCollector",
"reason": "search_top_hits",
- "time": "0.009783000000ms"
+ "time": "0.06989100000ms"
}
]
}
- ]
+ ],
+ "aggregations": []
}
]
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took": 25/"took": $body.took/]
+// TESTRESPONSE[s/"hits": \[...\]/"hits": $body.hits.hits/]
+// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/]
+// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/]
+// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
+// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/]
+// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
+// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
+// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
+// TESTRESPONSE[s/"time": "0.3919430000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.0.time/]
+// TESTRESPONSE[s/"score": 28776/"score": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.score/]
+// TESTRESPONSE[s/"build_scorer": 784451/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.build_scorer/]
+// TESTRESPONSE[s/"create_weight": 1669564/"create_weight": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.create_weight/]
+// TESTRESPONSE[s/"next_doc": 10111/"next_doc": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.next_doc/]
+// TESTRESPONSE[s/"time": "0.2106820000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.1.time/]
+// TESTRESPONSE[s/"score": 4552/"score": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.score/]
+// TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/]
+// TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/]
+// TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/]
+// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/]
+// Sorry for this mess....
+
<1> Search results are returned, but were omitted here for brevity
Even for a simple query, the response is relatively complicated. Let's break it down piece-by-piece before moving
@@ -139,11 +166,11 @@ First, the overall structure of the profile response is as follows:
"profile": {
"shards": [
{
- "id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]", <1>
+ "id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]", <1>
"searches": [
{
"query": [...], <2>
- "rewrite_time": 870954, <3>
+ "rewrite_time": 51443, <3>
"collector": [...] <4>
}
],
@@ -153,6 +180,12 @@ First, the overall structure of the profile response is as follows:
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"profile": /"took": $body.took, "timed_out": $body.timed_out, "_shards": $body._shards, "hits": $body.hits, "profile": /]
+// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/]
+// TESTRESPONSE[s/"query": \[...\]/"query": $body.profile.shards.0.searches.0.query/]
+// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/]
+// TESTRESPONSE[s/"collector": \[...\]/"collector": $body.profile.shards.0.searches.0.collector/]
+// TESTRESPONSE[s/"aggregations": \[...\]/"aggregations": []/]
<1> A profile is returned for each shard that participated in the response, and is identified
by a unique ID
<2> Each profile contains a section which holds details about the query execution
@@ -195,33 +228,38 @@ the `advance` phase of that query is the cause, for example.
The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard.
The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly
(or sometimes very) different. It will also use similar but not always identical naming. Using our previous
-`term` query example, let's analyze the `query` section:
+`match` query example, let's analyze the `query` section:
[source,js]
--------------------------------------------------
"query": [
{
"type": "BooleanQuery",
- "description": "message:search message:test",
- "time": "15.52889800ms",
+ "description": "message:message message:number",
+ "time": "1.873811000ms",
"breakdown": {...}, <1>
"children": [
{
"type": "TermQuery",
- "description": "message:search",
- "time": "4.938855000ms",
+ "description": "message:message",
+ "time": "0.3919430000ms",
"breakdown": {...}
},
{
"type": "TermQuery",
- "description": "message:test",
- "time": "0.5016660000ms",
+ "description": "message:number",
+ "time": "0.2106820000ms",
"breakdown": {...}
}
]
}
]
--------------------------------------------------
+// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n/]
+// TESTRESPONSE[s/]$/],"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
+// TESTRESPONSE[s/"time": "1.873811000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.breakdown/]
+// TESTRESPONSE[s/"time": "0.3919430000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.0.breakdown/]
+// TESTRESPONSE[s/"time": "0.2106820000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.1.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.1.breakdown/]
<1> The breakdown timings are omitted for simplicity
Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two
@@ -245,20 +283,27 @@ The `"breakdown"` component lists detailed timing statistics about low-level Luc
[source,js]
--------------------------------------------------
"breakdown": {
- "score": 5014,
- "score_count": 1,
- "build_scorer": 1689333,
- "build_scorer_count": 1,
- "match": 0,
- "match_count": 0,
- "create_weight": 166587,
- "create_weight_count": 1,
- "next_doc": 5542,
- "next_doc_count": 2,
- "advance": 0,
- "advance_count": 0
+ "score": 51306,
+ "score_count": 4,
+ "build_scorer": 2935582,
+ "build_scorer_count": 1,
+ "match": 0,
+ "match_count": 0,
+ "create_weight": 919297,
+ "create_weight_count": 1,
+ "next_doc": 53876,
+ "next_doc_count": 5,
+ "advance": 0,
+ "advance_count": 0
}
--------------------------------------------------
+// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time": $body.profile.shards.0.searches.0.query.0.time,/]
+// TESTRESPONSE[s/}$/},\n"children": $body.profile.shards.0.searches.0.query.0.children}],\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
+// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
+// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/]
+// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
+// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
+// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall
`"time"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is
@@ -348,10 +393,13 @@ Looking at the previous example:
{
"name": "SimpleTopScoreDocCollector",
"reason": "search_top_hits",
- "time": "2.206529000ms"
+ "time": "0.06989100000ms"
}
]
--------------------------------------------------
+// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/]
+// TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/]
+// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/]
We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector
used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The
@@ -473,6 +521,8 @@ GET /test/_search
}
}
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT test\n/]
This example has:
@@ -509,7 +559,7 @@ And the response:
"create_weight_count": 1,
"build_scorer": 377872,
"build_scorer_count": 1,
- "advance": 0
+ "advance": 0,
"advance_count": 0
}
},
@@ -528,7 +578,7 @@ And the response:
"create_weight_count": 1,
"build_scorer": 112551,
"build_scorer_count": 1,
- "advance": 0
+ "advance": 0,
"advance_count": 0
}
}
@@ -578,7 +628,7 @@ And the response:
"create_weight_count": 1,
"build_scorer": 38310,
"build_scorer_count": 1,
- "advance": 0
+ "advance": 0,
"advance_count": 0
}
}
@@ -640,7 +690,7 @@ the following example aggregations request:
[source,js]
--------------------------------------------------
-curl -XGET "http://localhost:9200/house-prices/_search" -d'
+GET /house-prices/_search
{
"profile": true,
"size": 0,
@@ -658,8 +708,10 @@ curl -XGET "http://localhost:9200/house-prices/_search" -d'
}
}
}
-}'
+}
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT house-prices\n/]
Which yields the following aggregation profile output
diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc
index a9adc157bd..852710df70 100644
--- a/docs/reference/search/request-body.asciidoc
+++ b/docs/reference/search/request-body.asciidoc
@@ -7,41 +7,49 @@ example:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{
+GET /twitter/tweet/_search
+{
"query" : {
"term" : { "user" : "kimchy" }
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
And here is a sample response:
[source,js]
--------------------------------------------------
{
+ "took": 1,
+ "timed_out": false,
"_shards":{
- "total" : 5,
- "successful" : 5,
+ "total" : 1,
+ "successful" : 1,
"failed" : 0
},
"hits":{
"total" : 1,
+ "max_score": 1.3862944,
"hits" : [
{
"_index" : "twitter",
"_type" : "tweet",
- "_id" : "1",
+ "_id" : "0",
+ "_score": 1.3862944,
"_source" : {
"user" : "kimchy",
- "postDate" : "2009-11-15T14:12:12",
- "message" : "trying out Elasticsearch"
+ "message": "trying out Elasticsearch",
+ "date" : "2009-11-15T14:12:12",
+ "likes" : 0
}
}
]
}
}
--------------------------------------------------
+// TESTRESPONSE[s/"took": 1/"took": $body.took/]
[float]
=== Parameters
@@ -105,8 +113,10 @@ matching document was found (per shard).
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/_search?q=tag:wow&size=0&terminate_after=1'
+GET /_search?q=message:elasticsearch&size=0&terminate_after=1
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
The response will not contain any hits as the `size` was set to `0`. The
`hits.total` will be either equal to `0`, indicating that there were no
@@ -128,12 +138,12 @@ be set to `true` in the response.
},
"hits": {
"total": 1,
- "max_score": 0,
+ "max_score": 0.0,
"hits": []
}
}
--------------------------------------------------
-
+// TESTRESPONSE[s/"took": 3/"took": $body.took/]
include::request/query.asciidoc[]
diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc
index 488884ef8d..179587daaa 100644
--- a/docs/reference/search/request/rescore.asciidoc
+++ b/docs/reference/search/request/rescore.asciidoc
@@ -12,9 +12,11 @@ A `rescore` request is executed on each shard before it returns its
results to be sorted by the node handling the overall search request.
Currently the rescore API has only one implementation: the query
-rescorer, which uses a query to tweak the scoring. In the future,
+rescorer, which uses a query to tweak the scoring. In the future,
alternative rescorers may be made available, for example, a pair-wise rescorer.
+NOTE: the `rescore` phase is not executed when <<search-request-sort,`sort`>> is used.
+
NOTE: when exposing pagination to your users, you should not change
`window_size` as you step through each page (by passing different
`from` values) since that can alter the top hits causing results to
diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc
index a082bf3ba4..d924a56b65 100644
--- a/docs/reference/search/request/scroll.asciidoc
+++ b/docs/reference/search/request/scroll.asciidoc
@@ -175,7 +175,7 @@ curl -XDELETE localhost:9200/_search/scroll \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ'
---------------------------------------
-
+[[sliced-scroll]]
==== Sliced Scroll
For scroll queries that return a lot of documents it is possible to split the scroll in multiple slices which
@@ -183,7 +183,7 @@ can be consumed independently:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
+GET /twitter/tweet/_search?scroll=1m
{
"slice": {
"id": 0, <1>
@@ -195,9 +195,7 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
}
}
}
-'
-
-curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
+GET /twitter/tweet/_search?scroll=1m
{
"slice": {
"id": 1,
@@ -209,8 +207,9 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
}
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[setup:big_twitter]
<1> The id of the slice
<2> The maximum number of slices
@@ -247,10 +246,10 @@ slice gets deterministic results.
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
+GET /twitter/tweet/_search?scroll=1m
{
"slice": {
- "field": "my_random_integer_field",
+ "field": "date",
"id": 0,
"max": 10
},
@@ -260,10 +259,11 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
}
}
}
-'
--------------------------------------------------
+// CONSOLE
+// TEST[setup:big_twitter]
For append only time-based indices, the `timestamp` field can be used safely.
NOTE: By default the maximum number of slices allowed per scroll is limited to 1024.
-You can update the `index.max_slices_per_scroll` index setting to bypass this limit. \ No newline at end of file
+You can update the `index.max_slices_per_scroll` index setting to bypass this limit.
diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc
index 85c5d1e675..d3bbe283a2 100644
--- a/docs/reference/search/request/sort.asciidoc
+++ b/docs/reference/search/request/sort.asciidoc
@@ -351,9 +351,23 @@ Multiple geo points can be passed as an array containing any `geo_point` format,
[source,js]
--------------------------------------------------
-"pin.location" : [[-70, 40], [-71, 42]]
-"pin.location" : [{"lat": 40, "lon": -70}, {"lat": 42, "lon": -71}]
+GET /_search
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : [[-70, 40], [-71, 42]],
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
--------------------------------------------------
+// CONSOLE
and so forth.
diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc
index 03c02538a0..5f42b5422b 100644
--- a/docs/reference/search/request/source-filtering.asciidoc
+++ b/docs/reference/search/request/source-filtering.asciidoc
@@ -5,7 +5,7 @@
Allows to control how the `_source` field is returned with every hit.
By default operations return the contents of the `_source` field unless
-you have used the `fields` parameter or if the `_source` field is disabled.
+you have used the `stored_fields` parameter or if the `_source` field is disabled.
You can turn off `_source` retrieval by using the `_source` parameter:
diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc
index 3d5b8c01b4..da9a083062 100644
--- a/docs/reference/search/request/stored-fields.asciidoc
+++ b/docs/reference/search/request/stored-fields.asciidoc
@@ -1,4 +1,4 @@
-[[search-request-fields]]
+[[search-request-stored-fields]]
=== Fields
WARNING: The `stored_fields` parameter is about fields that are explicitly marked as
@@ -38,10 +38,7 @@ GET /_search
--------------------------------------------------
// CONSOLE
-
-For backwards compatibility, if the fields parameter specifies fields which are not stored (`store` mapping set to
-`false`), it will load the `_source` and extract it from it. This functionality has been replaced by the
-<<search-request-source-filtering,source filtering>> parameter.
+If the requested fields are not stored (`store` mapping set to `false`), they will be ignored.
Field values fetched from the document it self are always returned as an array. Metadata fields like `_routing` and
`_parent` fields are never returned as an array.
@@ -53,3 +50,21 @@ Script fields can also be automatically detected and used as fields, so
things like `_source.obj1.field1` can be used, though not recommended, as
`obj1.field1` will work as well.
+==== Disable stored fields entirely
+
+To disable the stored fields (and metadata fields) entirely use: `\_none_`:
+
+[source,js]
+--------------------------------------------------
+GET /_search
+{
+ "stored_fields": "_none_",
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+
+NOTE: <<search-request-source-filtering,`_source`>> and <<search-request-version, `version`>> parameters cannot be activated if `_none_` is used.
+
diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc
index 718d47a986..c07c3755a2 100644
--- a/docs/reference/search/search-shards.asciidoc
+++ b/docs/reference/search/search-shards.asciidoc
@@ -14,29 +14,27 @@ Full example:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/_search_shards'
+GET /twitter/_search_shards
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT twitter\n/]
This will yield the following result:
[source,js]
--------------------------------------------------
{
- "nodes": {
- "JklnKbD7Tyqi9TP3_Q_tBg": {
- "name": "Rl'nnd",
- "transport_address": "inet[/192.168.1.113:9300]"
- }
- },
+ "nodes": ...,
"shards": [
[
{
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 3,
- "state": "STARTED"
+ "shard": 0,
+ "state": "STARTED",
+ "allocation_id": {"id":"0TvkCyF7TAmM1wHP4a42-A"},
+ "relocating_node": null
}
],
[
@@ -44,9 +42,10 @@ This will yield the following result:
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 4,
- "state": "STARTED"
+ "shard": 1,
+ "state": "STARTED",
+ "allocation_id": {"id":"fMju3hd1QHWmWrIgFnI4Ww"},
+ "relocating_node": null
}
],
[
@@ -54,9 +53,10 @@ This will yield the following result:
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 0,
- "state": "STARTED"
+ "shard": 2,
+ "state": "STARTED",
+ "allocation_id": {"id":"Nwl0wbMBTHCWjEEbGYGapg"},
+ "relocating_node": null
}
],
[
@@ -64,9 +64,10 @@ This will yield the following result:
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 2,
- "state": "STARTED"
+ "shard": 3,
+ "state": "STARTED",
+ "allocation_id": {"id":"bU_KLGJISbW0RejwnwDPKw"},
+ "relocating_node": null
}
],
[
@@ -74,42 +75,48 @@ This will yield the following result:
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 1,
- "state": "STARTED"
+ "shard": 4,
+ "state": "STARTED",
+ "allocation_id": {"id":"DMs7_giNSwmdqVukF7UydA"},
+ "relocating_node": null
}
]
]
}
--------------------------------------------------
+// TESTRESPONSE[s/"nodes": ...,/"nodes": $body.nodes,/]
+// TESTRESPONSE[s/JklnKbD7Tyqi9TP3_Q_tBg/$body.shards.0.0.node/]
+// TESTRESPONSE[s/0TvkCyF7TAmM1wHP4a42-A/$body.shards.0.0.allocation_id.id/]
+// TESTRESPONSE[s/fMju3hd1QHWmWrIgFnI4Ww/$body.shards.1.0.allocation_id.id/]
+// TESTRESPONSE[s/Nwl0wbMBTHCWjEEbGYGapg/$body.shards.2.0.allocation_id.id/]
+// TESTRESPONSE[s/bU_KLGJISbW0RejwnwDPKw/$body.shards.3.0.allocation_id.id/]
+// TESTRESPONSE[s/DMs7_giNSwmdqVukF7UydA/$body.shards.4.0.allocation_id.id/]
And specifying the same request, this time with a routing value:
[source,js]
--------------------------------------------------
-curl -XGET 'localhost:9200/twitter/_search_shards?routing=foo,baz'
+GET /twitter/_search_shards?routing=foo,baz
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT twitter\n/]
This will yield the following result:
[source,js]
--------------------------------------------------
{
- "nodes": {
- "JklnKbD7Tyqi9TP3_Q_tBg": {
- "name": "Rl'nnd",
- "transport_address": "inet[/192.168.1.113:9300]"
- }
- },
+ "nodes": ...,
"shards": [
[
{
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 2,
- "state": "STARTED"
+ "shard": 0,
+ "state": "STARTED",
+ "allocation_id": {"id":"0TvkCyF7TAmM1wHP4a42-A"},
+ "relocating_node": null
}
],
[
@@ -117,14 +124,19 @@ This will yield the following result:
"index": "twitter",
"node": "JklnKbD7Tyqi9TP3_Q_tBg",
"primary": true,
- "relocating_node": null,
- "shard": 4,
- "state": "STARTED"
+ "shard": 1,
+ "state": "STARTED",
+ "allocation_id": {"id":"fMju3hd1QHWmWrIgFnI4Ww"},
+ "relocating_node": null
}
]
]
}
--------------------------------------------------
+// TESTRESPONSE[s/"nodes": ...,/"nodes": $body.nodes,/]
+// TESTRESPONSE[s/JklnKbD7Tyqi9TP3_Q_tBg/$body.shards.0.0.node/]
+// TESTRESPONSE[s/0TvkCyF7TAmM1wHP4a42-A/$body.shards.0.0.allocation_id.id/]
+// TESTRESPONSE[s/fMju3hd1QHWmWrIgFnI4Ww/$body.shards.1.0.allocation_id.id/]
This time the search will only be executed against two of the shards, because
routing values have been specified.
diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc
index 7ed6f2de8c..7c6a0390ea 100644
--- a/docs/reference/search/search-template.asciidoc
+++ b/docs/reference/search/search-template.asciidoc
@@ -56,7 +56,7 @@ GET /_search/template
[float]
===== Converting parameters to JSON
-The `{{toJson}}parameter{{/toJson}}` function can be used to convert parameters
+The `{{#toJson}}parameter{{/toJson}}` function can be used to convert parameters
like maps and array to their JSON representation:
[source,js]
@@ -486,6 +486,42 @@ GET /_render/template/<template_name>
}
------------------------------------------
+[float]
+===== Explain
+
+You can use `explain` parameter when running a template:
+
+[source,js]
+------------------------------------------
+GET /_search/template
+{
+ "file": "my_template",
+ "params": {
+ "status": [ "pending", "published" ]
+ },
+ "explain": true
+}
+------------------------------------------
+
+
+[float]
+===== Profiling
+
+You can use `profile` parameter when running a template:
+
+[source,js]
+------------------------------------------
+GET /_search/template
+{
+ "file": "my_template",
+ "params": {
+ "status": [ "pending", "published" ]
+ },
+ "profile": true
+}
+------------------------------------------
+
+
[[multi-search-template]]
== Multi Search Template
diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc
index 40118e7693..eccba57dee 100644
--- a/docs/reference/search/search.asciidoc
+++ b/docs/reference/search/search.asciidoc
@@ -17,38 +17,48 @@ twitter index:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/_search?q=user:kimchy'
+GET /twitter/_search?q=user:kimchy
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
We can also search within specific types:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/twitter/tweet,user/_search?q=user:kimchy'
+GET /twitter/tweet,user/_search?q=user:kimchy
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
We can also search all tweets with a certain tag across several indices
(for example, when each user has his own index):
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/kimchy,elasticsearch/tweet/_search?q=tag:wow'
+GET /kimchy,elasticsearch/tweet/_search?q=tag:wow
--------------------------------------------------
+// CONSOLE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
Or we can search all tweets across all available indices using `_all`
placeholder:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/_all/tweet/_search?q=tag:wow'
+GET /_all/tweet/_search?q=tag:wow
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
Or even search across all indices and all types:
[source,js]
--------------------------------------------------
-$ curl -XGET 'http://localhost:9200/_search?q=tag:wow'
+GET /_search?q=tag:wow
--------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
By default elasticsearch rejects search requests that would query more than
1000 shards. The reason is that such large numbers of shards make the job of
diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc
index 587cdf86bd..60065ce96b 100644
--- a/docs/reference/search/suggesters/completion-suggest.asciidoc
+++ b/docs/reference/search/suggesters/completion-suggest.asciidoc
@@ -194,26 +194,50 @@ returns this response:
--------------------------------------------------
// TESTRESPONSE
-The configured weight for a suggestion is returned as `_score`.
-The `text` field uses the `input` of your indexed suggestion.
-Suggestions are document oriented, the document source is
-returned in `_source`. <<search-request-source-filtering, source filtering>>
-parameters are supported for filtering the document source.
+
+IMPORTANT: `_source` meta-field must be enabled, which is the default
+behavior, to enable returning `_source` with suggestions.
+
+The configured weight for a suggestion is returned as `_score`. The
+`text` field uses the `input` of your indexed suggestion. Suggestions
+return the full document `_source` by default. The size of the `_source`
+can impact performance due to disk fetch and network transport overhead.
+For best performance, filter out unnecessary fields from the `_source`
+using <<search-request-source-filtering, source filtering>> to minimize
+`_source` size. The following demonstrates an example completion query
+with source filtering:
+
+[source,js]
+--------------------------------------------------
+POST music/_suggest
+{
+ "_source": "completion.*",
+ "song-suggest" : {
+ "prefix" : "nir",
+ "completion" : {
+ "field" : "suggest"
+ }
+ }
+}
+--------------------------------------------------
The basic completion suggester query supports the following parameters:
`field`:: The name of the field on which to run the query (required).
`size`:: The number of suggestions to return (defaults to `5`).
-`payload`:: The name of the field or field name array to be returned
- as payload (defaults to no fields).
NOTE: The completion suggester considers all documents in the index.
See <<suggester-context>> for an explanation of how to query a subset of
documents instead.
-NOTE: Specifying `payload` fields will incur additional search performance
-hit. The `payload` fields are retrieved eagerly (single pass) for top
-suggestions at the shard level using field data or from doc values.
+NOTE: In case of completion queries spanning more than one shard, the suggest
+is executed in two phases, where the last phase fetches the relevant documents
+from shards, implying executing completion requests against a single shard is
+more performant due to the document fetch overhead when the suggest spans
+multiple shards. To get best performance for completions, it is recommended to
+index completions into a single shard index. In case of high heap usage due to
+shard size, it is still recommended to break index into multiple shards instead
+of optimizing for completion performance.
[[fuzzy]]
==== Fuzzy queries
diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc
index fec167c7b8..66e19eaadb 100644
--- a/docs/reference/search/suggesters/context-suggest.asciidoc
+++ b/docs/reference/search/suggesters/context-suggest.asciidoc
@@ -71,6 +71,7 @@ PUT place_path_category
}
}
--------------------------------------------------
+// CONSOLE
// TESTSETUP
<1> Defines a `category` context named 'place_type' where the categories must be
sent with the suggestions.
@@ -219,7 +220,6 @@ at index time. At query time, suggestions can be filtered and boosted if they ar
a certain distance of a specified geo location.
Internally, geo points are encoded as geohashes with the specified precision.
-See <<geohash>> for more background details.
[float]
===== Geo Mapping
@@ -330,6 +330,7 @@ POST place/_suggest?pretty
}
}
--------------------------------------------------
+// CONSOLE
// TEST[continued]
<1> The context query filters for suggestions that fall under
the geo location represented by a geohash of '(43.662, -79.380)'
diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc
index 03cbb5af35..6c502421e4 100644
--- a/docs/reference/search/suggesters/phrase-suggest.asciidoc
+++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc
@@ -71,12 +71,12 @@ PUT test
}
}
}
-POST test/test
+POST test/test?refresh=true
{"title": "noble warriors"}
-POST test/test
+POST test/test?refresh=true
{"title": "nobel prize"}
-POST _refresh
--------------------------------------------------
+// CONSOLE
// TESTSETUP
Once you have the analyzers and mappings set up you can use the `phrase`
@@ -84,7 +84,7 @@ suggester in the same spot you'd use the `term` suggester:
[source,js]
--------------------------------------------------
-POST _suggest?pretty -d'
+POST _suggest
{
"text": "noble prize",
"simple_phrase": {
@@ -125,7 +125,7 @@ can contain misspellings (See parameter descriptions below).
"options" : [ {
"text" : "nobel prize",
"highlighted": "<em>nobel</em> prize",
- "score" : 0.40765354
+ "score" : 0.5962314
}]
}
]
@@ -314,7 +314,7 @@ The direct generators support the following parameters:
The suggest mode controls what suggestions are included on the suggestions
generated on each shard. All values other than `always` can be thought of
as an optimization to generate fewer suggestions to test on each shard and
- are not rechecked at when combining the suggestions generated on each
+ are not rechecked when combining the suggestions generated on each
shard. Thus `missing` will generate suggestions for terms on shards that do
not contain them even other shards do contain them. Those should be
filtered out using `confidence`. Three possible values can be specified:
diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc
index 8a2730c014..5fb4ad9b7c 100644
--- a/docs/reference/search/validate.asciidoc
+++ b/docs/reference/search/validate.asciidoc
@@ -111,6 +111,7 @@ GET twitter/tweet/_validate/query?q=post_date:foo&explain=true
responds with:
+[source,js]
--------------------------------------------------
{
"valid" : false,
diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc
index 1da6136003..0f88d03401 100644
--- a/docs/reference/setup/bootstrap-checks.asciidoc
+++ b/docs/reference/setup/bootstrap-checks.asciidoc
@@ -74,36 +74,6 @@ have `memlock unlimited`). The memory lock check verifies that *if* the
able to lock the heap. To pass the memory lock check, you might have to
configure <<mlockall,`mlockall`>>.
-=== Minimum master nodes check
-
-Elasticsearch uses a single master for managing cluster state but
-enables there to be multiple master-eligible nodes for
-high-availability. In the case of a partition, master-eligible nodes on
-each side of the partition might be elected as the acting master without
-knowing that there is a master on the side of the partition. This can
-lead to divergent cluster states potentially leading to data loss when
-the partition is healed. This is the notion of a split brain and it is
-the worst thing that can happen to an Elasticsearch cluster. But by
-configuring
-<<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>> to be
-equal to a quorum of master-eligible nodes, it is not possible for the
-cluster to suffer from split brain because during a network partition
-there can be at most one side of the partition that contains a quorum of
-master nodes. The minimum master nodes check enforces that you've set
-<<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>>. To pass
-the minimum master nodes check, you must configure
-<<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>>.
-
-NOTE: The minimum master nodes check does not enforce that you've
-configured <<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>>
-correctly, only that you have it configured. Elasticsearch does log a
-warning message if it detects that
-<<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>> is
-incorrectly configured based on the number of master-eligible nodes
-visible in the cluster state. Future versions of Elasticsearch will
-contain stricter enforcement of
-<<minimum_master_nodes,`discovery.zen.minimum_master_nodes`>>.
-
=== Maximum number of threads check
Elasticsearch executes requests by breaking the request down into stages
@@ -157,6 +127,20 @@ systems and operating systems, the server VM is the
default. Additionally, Elasticsearch is configured by default to force
the server VM.
+=== Use serial collector check
+
+There are various garbage collectors for the OpenJDK-derived JVMs targeting
+different workloads. The serial collector in particular is best suited for
+single logical CPU machines or extremely small heaps, neither of which are
+suitable for running Elasticsearch. Using the serial collector with
+Elasticsearch can be devastating for performance. The serial collector check
+ensures that Elasticsearch is not configured to run with the serial
+collector. To pass the serial collector check, you must not start Elasticsearch
+with the serial collector (whether it's from the defaults for the JVM that
+you're using, or you've explicitly specified it with `-XX:+UseSerialGC`). Note
+that the default JVM configuration that ship with Elasticsearch configures
+Elasticsearch to use the CMS collector.
+
=== OnError and OnOutOfMemoryError checks
The JVM options `OnError` and `OnOutOfMemoryError` enable executing
diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc
index 68f73fc96b..8db4f6bae5 100644
--- a/docs/reference/setup/configuration.asciidoc
+++ b/docs/reference/setup/configuration.asciidoc
@@ -15,7 +15,7 @@ able to join a cluster, such as `cluster.name` and `network.host`.
Elasticsearch has two configuration files:
* `elasticsearch.yml` for configuring Elasticsearch, and
-* `logging.yml` for configuring Elasticsearch logging.
+* `log4j2.properties` for configuring Elasticsearch logging.
These files are located in the config directory, whose location defaults to
`$ES_HOME/config/`. The Debian and RPM packages set the config directory
@@ -110,24 +110,50 @@ command line with `es.node.name` or in the config file with `node.name`.
[[logging]]
== Logging configuration
-Elasticsearch uses an internal logging abstraction and comes, out of the
-box, with http://logging.apache.org/log4j/1.2/[log4j]. It tries to simplify
-log4j configuration by using http://www.yaml.org/[YAML] to configure it,
-and the logging configuration file is `config/logging.yml`. The
-http://en.wikipedia.org/wiki/JSON[JSON] and
-http://en.wikipedia.org/wiki/.properties[properties] formats are also
-supported. Multiple configuration files can be loaded, in which case they will
-get merged, as long as they start with the `logging.` prefix and end with one
-of the supported suffixes (either `.yml`, `.yaml`, `.json` or `.properties`).
-The logger section contains the java packages and their corresponding log
-level, where it is possible to omit the `org.elasticsearch` prefix. The
-appender section contains the destinations for the logs. Extensive information
-on how to customize logging and all the supported appenders can be found on
-the http://logging.apache.org/log4j/1.2/manual.html[log4j documentation].
-
-Additional Appenders and other logging classes provided by
-http://logging.apache.org/log4j/extras/[log4j-extras] are also available,
-out of the box.
+Elasticsearch uses http://logging.apache.org/log4j/2.x/[Log4j 2] for
+logging. Log4j 2 can be configured using the log4j2.properties
+file. Elasticsearch exposes a single property `${sys:es.logs}` that can be
+referenced in the configuration file to determine the location of the log files;
+this will resolve to a prefix for the Elasticsearch log file at runtime.
+
+For example, if your log directory (`path.logs`) is `/var/log/elasticsearch` and
+your cluster is named `production` then `${sys:es.logs}` will resolve to
+`/var/log/elasticsearch/production`.
+
+[source,properties]
+--------------------------------------------------
+appender.rolling.type = RollingFile <1>
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs}.log <2>
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
+appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log <3>
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4>
+appender.rolling.policies.time.interval = 1 <5>
+appender.rolling.policies.time.modulate = true <6>
+--------------------------------------------------
+
+<1> Configure the `RollingFile` appender
+<2> Log to `/var/log/elasticsearch/production.log`
+<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd.log`
+<4> Using a time-based roll policy
+<5> Roll logs on a daily basis
+<6> Align rolls on the day boundary (as opposed to rolling every twenty-four
+ hours)
+
+If you append `.gz` or `.zip` to `appender.rolling.filePattern`, then the logs
+will be compressed as they are rolled.
+
+Multiple configuration files can be loaded (in which case they will get merged)
+as long as they are named `log4j2.properties` and have the Elasticsearch config
+directory as an ancestor; this is useful for plugins that expose additional
+loggers. The logger section contains the java packages and their corresponding
+log level. The appender section contains the destinations for the logs.
+Extensive information on how to customize logging and all the supported
+appenders can be found on the
+http://logging.apache.org/log4j/2.x/manual/configuration.html[Log4j
+documentation].
[float]
[[deprecation-logging]]
@@ -136,14 +162,21 @@ out of the box.
In addition to regular logging, Elasticsearch allows you to enable logging
of deprecated actions. For example this allows you to determine early, if
you need to migrate certain functionality in the future. By default,
-deprecation logging is disabled. You can enable it in the `config/logging.yml`
-file by setting the deprecation log level to `DEBUG`.
+deprecation logging is enabled at the WARN level, the level at which all
+deprecation log messages will be emitted.
-[source,yaml]
+[source,properties]
--------------------------------------------------
-deprecation: DEBUG, deprecation_log_file
+logger.deprecation.level = warn
--------------------------------------------------
This will create a daily rolling deprecation log file in your log directory.
Check this file regularly, especially when you intend to upgrade to a new
major version.
+
+The default logging configuration has set the roll policy for the deprecation
+logs to roll and compress after 1 GB, and to preserve a maximum of five log
+files (four rolled logs, and the active log).
+
+You can disable it in the `config/log4j2.properties` file by setting the deprecation
+log level to `info`.
diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc
index db4783c3df..a3ba7fc912 100644
--- a/docs/reference/setup/install/check-running.asciidoc
+++ b/docs/reference/setup/install/check-running.asciidoc
@@ -3,26 +3,34 @@
You can test that your Elasticsearch node is running by sending an HTTP
request to port `9200` on `localhost`:
-[source,sh]
+[source,js]
--------------------------------------------
-curl localhost:9200
+GET /
--------------------------------------------
+// CONSOLE
which should give you a response something like this:
-[source,js]
+["source","js",subs="attributes,callouts"]
--------------------------------------------
{
- "name" : "Harry Leland",
+ "name" : "Cp8oag6",
"cluster_name" : "elasticsearch",
+ "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",
"version" : {
- "number" : "5.0.0-alpha1",
+ "number" : "{version}",
"build_hash" : "f27399d",
"build_date" : "2016-03-30T09:51:41.449Z",
"build_snapshot" : false,
- "lucene_version" : "6.0.0"
+ "lucene_version" : "{lucene_version}"
},
"tagline" : "You Know, for Search"
}
--------------------------------------------
-
+// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/]
+// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/]
+// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/]
+// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/]
+// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/]
+// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/]
+// So much s/// but at least we test that the layout is close to matching....
diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc
index a9bf417079..578dcb5db4 100644
--- a/docs/reference/setup/install/deb.asciidoc
+++ b/docs/reference/setup/install/deb.asciidoc
@@ -16,7 +16,7 @@ include::key.asciidoc[]
[source,sh]
-------------------------
-wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
+wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
-------------------------
[[deb-repo]]
@@ -29,11 +29,11 @@ You may need to install the `apt-transport-https` package on Debian before proce
sudo apt-get install apt-transport-https
--------------------------------------------------
-Save the repository definition to +/etc/apt/sources.list.d/elasticsearch-{major-version}.list+:
+Save the repository definition to +/etc/apt/sources.list.d/elastic-{major-version}.list+:
["source","sh",subs="attributes,callouts"]
--------------------------------------------------
-echo "deb https://packages.elastic.co/elasticsearch/{major-version}/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-{major-version}.list
+echo "deb https://artifacts.elastic.co/packages/{major-version}-prerelease/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-{major-version}.list
--------------------------------------------------
[WARNING]
@@ -63,7 +63,7 @@ If two entries exist for the same Elasticsearch repository, you will see an erro
["literal",subs="attributes,callouts"]
-Duplicate sources.list entry https://packages.elastic.co/elasticsearch/{major-version}/debian/ ...`
+Duplicate sources.list entry https://artifacts.elastic.co/packages/{major-version}/apt/ ...`
Examine +/etc/apt/sources.list.d/elasticsearch-{major-version}.list+ for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file.
==================================================
@@ -76,12 +76,12 @@ The Debian package for Elastisearch v{version} can be downloaded from the websit
["source","sh",subs="attributes"]
--------------------------------------------
-wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/{version}/elasticsearch-{version}.deb
+wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.deb
sha1sum elasticsearch-{version}.deb <1>
sudo dpkg -i elasticsearch-{version}.deb
--------------------------------------------
<1> Compare the SHA produced by `sha1sum` or `shasum` with the
- https://download.elastics.co/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/{version}/elasticsearch-{version}.deb.sha1[published SHA].
+ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.deb.sha1[published SHA].
include::init-systemd.asciidoc[]
diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc
index 08dfc253f0..785c0897c3 100644
--- a/docs/reference/setup/install/rpm.asciidoc
+++ b/docs/reference/setup/install/rpm.asciidoc
@@ -20,7 +20,7 @@ include::key.asciidoc[]
[source,sh]
-------------------------
-rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
+rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
-------------------------
[[rpm-repo]]
@@ -34,9 +34,9 @@ OpenSuSE based distributions, containing:
--------------------------------------------------
[elasticsearch-{major-version}]
name=Elasticsearch repository for {major-version} packages
-baseurl=https://packages.elastic.co/elasticsearch/{major-version}/centos
+baseurl=https://artifacts.elastic.co/packages/{major-version}-prerelease/yum
gpgcheck=1
-gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
+gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
@@ -61,12 +61,12 @@ The RPM for Elastisearch v{version} can be downloaded from the website and insta
["source","sh",subs="attributes"]
--------------------------------------------
-wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/rpm/elasticsearch/{version}/elasticsearch-{version}.rpm
+wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.rpm
sha1sum elasticsearch-{version}.rpm <1>
sudo rpm --install elasticsearch-{version}.rpm
--------------------------------------------
<1> Compare the SHA produced by `sha1sum` or `shasum` with the
- https://download.elastics.co/elasticsearch/release/org/elasticsearch/distribution/rpm/elasticsearch/{version}/elasticsearch-{version}.rpm.sha1[published SHA].
+ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.rpm.sha1[published SHA].
include::init-systemd.asciidoc[]
diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc
index da3f091096..8cf3482bf3 100644
--- a/docs/reference/setup/install/sysconfig-file.asciidoc
+++ b/docs/reference/setup/install/sysconfig-file.asciidoc
@@ -40,7 +40,7 @@
`CONF_DIR`::
Configuration file directory (which needs to include `elasticsearch.yml`
- and `logging.yml` files), defaults to `/etc/elasticsearch`.
+ and `log4j2.properties` files), defaults to `/etc/elasticsearch`.
`ES_JAVA_OPTS`::
diff --git a/docs/reference/setup/install/systemd.asciidoc b/docs/reference/setup/install/systemd.asciidoc
index 035932a83f..bf94e95fb6 100644
--- a/docs/reference/setup/install/systemd.asciidoc
+++ b/docs/reference/setup/install/systemd.asciidoc
@@ -18,13 +18,36 @@ sudo systemctl stop elasticsearch.service
--------------------------------------------
These commands provide no feedback as to whether Elasticsearch was started
-successfully or not. Instead, this information will be written to the
-`systemd` journal, which can be tailed as follows:
+successfully or not. Instead, this information will be written in the log
+files located in `/var/log/elasticsearch/`.
+
+By default the Elasticsearch service doesn't log information in the `systemd`
+journal. To enable `journalctl` logging, the `--quiet` option must be removed
+ from the `ExecStart` command line in the `elasticsearch.service` file.
+
+When `systemd` logging is enabled, the logging information are available using
+the `journalctl` commands:
+
+To tail the journal:
[source,sh]
--------------------------------------------
sudo journalctl -f
--------------------------------------------
-Log files can be found in `/var/log/elasticsearch/`.
+To list journal entries for the elasticsearch service:
+
+[source,sh]
+--------------------------------------------
+sudo journalctl --unit elasticsearch
+--------------------------------------------
+
+To list journal entries for the elasticsearch service starting from a given time:
+
+[source,sh]
+--------------------------------------------
+sudo journalctl --unit elasticsearch --since "2016-10-30 18:17:16"
+--------------------------------------------
+Check `man journalctl` or https://www.freedesktop.org/software/systemd/man/journalctl.html for
+more command line options.
diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc
index e66c176470..22e3c3b8d4 100644
--- a/docs/reference/setup/install/windows.asciidoc
+++ b/docs/reference/setup/install/windows.asciidoc
@@ -2,7 +2,7 @@
=== Install Elasticsearch on Windows
Elasticsearch can be installed on Windows using the `.zip` package. This
-comes with a `service.bat` command which will setup Elasticsearch to run as a
+comes with a `elasticsearch-service.bat` command which will setup Elasticsearch to run as a
service.
The latest stable version of Elasticsearch can be found on the
@@ -13,7 +13,7 @@ link:/downloads/past-releases[Past Releases page].
[[install-windows]]
==== Download and install the `.zip` package
-Download the `.zip` archive for Elastisearch v{version} from: https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip
+Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip
Unzip it with your favourite unzip tool. This will create a folder called
+elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal
@@ -65,7 +65,7 @@ include::check-running.asciidoc[]
Elasticsearch can be installed as a service to run in the background or start
automatically at boot time without any user interaction. This can be achieved
-through the `service.bat` script in the `bin\` folder which allows one to
+through the `elasticsearch-service.bat` script in the `bin\` folder which allows one to
install, remove, manage or configure the service and potentially start and
stop the service, all from the command-line.
@@ -73,7 +73,7 @@ stop the service, all from the command-line.
--------------------------------------------------
c:\elasticsearch-{version}{backslash}bin>service
-Usage: service.bat install|remove|start|stop|manager [SERVICE_ID]
+Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID]
--------------------------------------------------
The script requires one parameter (the command to execute) followed by an
@@ -156,7 +156,7 @@ The Elasticsearch service can be configured prior to installation by setting the
`CONF_DIR`::
Configuration file directory (which needs to include `elasticsearch.yml`
- and `logging.yml` files), defaults to `%ES_HOME%\conf`.
+ and `log4j2.properties` files), defaults to `%ES_HOME%\conf`.
`ES_JAVA_OPTS`::
@@ -170,18 +170,18 @@ The Elasticsearch service can be configured prior to installation by setting the
The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`.
-NOTE: At its core, `service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project
+NOTE: At its core, `elasticsearch-service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project
to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled.
NOTE: On Windows, the <<heap-size,heap size>> can be configured as for
any other Elasticsearch installation when running Elasticsearch from the
command line, or when installing Elasticsearch as a service for the
first time. To adjust the heap size for an already installed service,
-use the service manager: `bin\service.bat manager`.
+use the service manager: `bin\elasticsearch-service.bat manager`.
Using the Manager GUI::
-It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `service.bat manager` from the command-line will open up the manager window:
+It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window:
image::images/service-manager-win.png["Windows Service Manager GUI",align="center"]
diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc
index 5f85447bdc..efbfc50f7a 100644
--- a/docs/reference/setup/install/zip-targz.asciidoc
+++ b/docs/reference/setup/install/zip-targz.asciidoc
@@ -17,13 +17,13 @@ The `.zip` archive for Elastisearch v{version} can be downloaded and installed a
["source","sh",subs="attributes"]
--------------------------------------------
-wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip
+wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip
sha1sum elasticsearch-{version}.zip <1>
unzip elasticsearch-{version}.zip
cd elasticsearch-{version}/ <2>
--------------------------------------------
<1> Compare the SHA produced by `sha1sum` or `shasum` with the
- https://download.elastics.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip.sha1[published SHA].
+ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip.sha1[published SHA].
<2> This directory is known as `$ES_HOME`.
[[install-targz]]
@@ -33,13 +33,13 @@ The `.tar.gz` archive for Elastisearch v{version} can be downloaded and installe
["source","sh",subs="attributes"]
--------------------------------------------
-wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/{version}/elasticsearch-{version}.tar.gz
+wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz
sha1sum elasticsearch-{version}.tar.gz <1>
tar -xzf elasticsearch-{version}.tar.gz
cd elasticsearch-{version}/ <2>
--------------------------------------------
<1> Compare the SHA produced by `sha1sum` or `shasum` with the
- https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/{version}/elasticsearch-{version}.tar.gz.sha1[published SHA].
+ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz.sha1[published SHA].
<2> This directory is known as `$ES_HOME`.
[[zip-targz-running]]
@@ -52,11 +52,14 @@ Elasticsearch can be started from the command line as follows:
./bin/elasticsearch
--------------------------------------------
-By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT`,
-and can be stopped by pressing `Ctrl-C`.
+By default, Elasticsearch runs in the foreground, prints its logs to the
+standard output (`stdout`), and can be stopped by pressing `Ctrl-C`.
include::check-running.asciidoc[]
+Log printing to `stdout` can be disabled using the `-q` or `--quiet`
+option on the command line.
+
[[setup-installation-daemon]]
==== Running as a daemon
diff --git a/docs/reference/setup/stopping.asciidoc b/docs/reference/setup/stopping.asciidoc
index 45a3b122a9..4f632ec06f 100644
--- a/docs/reference/setup/stopping.asciidoc
+++ b/docs/reference/setup/stopping.asciidoc
@@ -23,7 +23,7 @@ From the Elasticsearch startup logs:
[source,sh]
--------------------------------------------------
-[2016-07-07 12:26:18,908][INFO ][node ] [Reaper] version[5.0.0-alpha4], pid[15399], build[3f5b994/2016-06-27T16:23:46.861Z], OS[Mac OS X/10.11.5/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_92/25.92-b14]
+[2016-07-07 12:26:18,908][INFO ][node ] [I8hydUG] version[5.0.0-alpha4], pid[15399], build[3f5b994/2016-06-27T16:23:46.861Z], OS[Mac OS X/10.11.5/x86_64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_92/25.92-b14]
--------------------------------------------------
Or by specifying a location to write a PID file to on startup (`-p <path>`):
diff --git a/docs/reference/setup/sysconfig/file-descriptors.asciidoc b/docs/reference/setup/sysconfig/file-descriptors.asciidoc
index 73a8dad616..25a2214146 100644
--- a/docs/reference/setup/sysconfig/file-descriptors.asciidoc
+++ b/docs/reference/setup/sysconfig/file-descriptors.asciidoc
@@ -18,5 +18,6 @@ using the <<cluster-nodes-stats>> API, with:
[source,js]
--------------------------------------------------
-curl 'localhost:9200/_nodes/stats/process?pretty&filter_path=**.max_file_descriptors'
+GET _nodes/stats/process?filter_path=**.max_file_descriptors
--------------------------------------------------
+// CONSOLE
diff --git a/docs/reference/setup/sysconfig/heap_size.asciidoc b/docs/reference/setup/sysconfig/heap_size.asciidoc
index 00c4553b97..f54ca7813a 100644
--- a/docs/reference/setup/sysconfig/heap_size.asciidoc
+++ b/docs/reference/setup/sysconfig/heap_size.asciidoc
@@ -1,8 +1,8 @@
[[heap-size]]
=== Set JVM heap size via jvm.options
-In development mode, Elasticsearch tells the JVM to use a heap with a minimum
-size of 256 MB and a maximum size of 1 GB. When moving to production, it is
+By default, Elasticsearch tells the JVM to use a heap with a minimum
+and maximum size of 2 GB. When moving to production, it is
important to configure heap size to ensure that Elasticsearch has enough
heap available.
@@ -48,8 +48,8 @@ Here are examples of how to set the heap size via the jvm.options file:
[source,txt]
------------------
-Xms2g <1>
-Xmx2g <2>
+-Xms2g <1>
+-Xmx2g <2>
------------------
<1> Set the minimum heap size to 2g.
<2> Set the maximum heap size to 2g.
diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc
index b84db6f415..08752e6fd9 100644
--- a/docs/reference/setup/sysconfig/swap.asciidoc
+++ b/docs/reference/setup/sysconfig/swap.asciidoc
@@ -35,8 +35,9 @@ request:
[source,sh]
--------------
-curl 'http://localhost:9200/_nodes?pretty&filter_path=**.mlockall'
+GET _nodes?filter_path=**.mlockall
--------------
+// CONSOLE
If you see that `mlockall` is `false`, then it means that the `mlockall`
request has failed. You will also see a line with more information in the
@@ -63,14 +64,7 @@ Systems using `systemd`::
Another possible reason why `mlockall` can fail is that the temporary directory
(usually `/tmp`) is mounted with the `noexec` option. This can be solved by
-specifying a new temp directory, by starting Elasticsearch with:
-
-[source,sh]
---------------
-./bin/elasticsearch -Djava.io.tmpdir=/path/to/temp/dir
---------------
-
-or using the `ES_JAVA_OPTS` environment variable:
+specifying a new temp directory using the `ES_JAVA_OPTS` environment variable:
[source,sh]
--------------
@@ -78,6 +72,8 @@ export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djava.io.tmpdir=/path/to/temp/dir"
./bin/elasticsearch
--------------
+or setting this JVM flag in the jvm.options configuration file.
+
[[disable-swap-files]]
==== Disable all swap files
@@ -100,4 +96,3 @@ The second option available on Linux systems is to ensure that the sysctl value
`vm.swappiness` is set to `1`. This reduces the kernel's tendency to swap and
should not lead to swapping under normal circumstances, while still allowing
the whole system to swap in emergency conditions.
-
diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc
index fe7daf8cca..0bf99b2faf 100644
--- a/docs/reference/testing/testing-framework.asciidoc
+++ b/docs/reference/testing/testing-framework.asciidoc
@@ -20,7 +20,7 @@ All of the tests are run using a custom junit runner, the `RandomizedRunner` pro
First, you need to include the testing dependency in your project, along with the elasticsearch dependency you have already added. If you use maven and its `pom.xml` file, it looks like this
-[[source,xml]]
+[source,xml]
--------------------------------------------------
<dependencies>
<dependency>
@@ -258,5 +258,3 @@ assertHitCount(searchResponse, 4);
assertFirstHit(searchResponse, hasId("4"));
assertSearchHits(searchResponse, "1", "2", "3", "4");
----------------------------
-
-
diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc
index 258b880f88..47ca68e00f 100644
--- a/docs/resiliency/index.asciidoc
+++ b/docs/resiliency/index.asciidoc
@@ -64,6 +64,22 @@ framework. As the Jepsen tests evolve, we will continue porting new scenarios th
all new scenarios and will report issues that we find on this page and in our GitHub repository.
[float]
+=== Repeated network partitions can cause cluster state updates to be lost (STATUS: ONGOING)
+
+During a networking partition, cluster state updates (like mapping changes or shard assignments)
+are committed if a majority of the master-eligible nodes received the update correctly. This means that the current master has access
+to enough nodes in the cluster to continue to operate correctly. When the network partition heals, the isolated nodes catch
+up with the current state and receive the previously missed changes. However, if a second partition happens while the cluster
+is still recovering from the previous one *and* the old master falls on the minority side, it may be that a new master is elected
+which has not yet catch up. If that happens, cluster state updates can be lost.
+
+This problem is mostly fixed by {GIT}20384[#20384] (v5.0.0), which takes committed cluster state updates into account during master
+election. This considerably reduces the chance of this rare problem occurring but does not fully mitigate it. If the second partition
+happens concurrently with a cluster state update and blocks the cluster state commit message from reaching a majority of nodes, it may be
+that the in flight update will be lost. If the now-isolated master can still acknowledge the cluster state update to the client this
+will amount to the loss of an acknowledged change. Fixing that last scenario needs considerable work and is currently targeted at (v6.0.0).
+
+[float]
=== Better request retry mechanism when nodes are disconnected (STATUS: ONGOING)
If the node holding a primary shard is disconnected for whatever reason, the
@@ -96,7 +112,7 @@ exceptions, but it is still possible to cause a node to run out of heap
space. The following issues have been identified:
* Set a hard limit on `from`/`size` parameters {GIT}9311[#9311]. (STATUS: DONE, v2.1.0)
-* Prevent combinatorial explosion in aggregations from causing OOM {GIT}8081[#8081]. (STATUS: ONGOING)
+* Prevent combinatorial explosion in aggregations from causing OOM {GIT}8081[#8081]. (STATUS: DONE, v5.0.0)
* Add the byte size of each hit to the request circuit breaker {GIT}9310[#9310]. (STATUS: ONGOING)
* Limit the size of individual requests and also add a circuit breaker for the total memory used by in-flight request objects {GIT}16011[#16011]. (STATUS: DONE, v5.0.0)
diff --git a/modules/build.gradle b/modules/build.gradle
index 11131c28e2..d5b207625c 100644
--- a/modules/build.gradle
+++ b/modules/build.gradle
@@ -35,8 +35,4 @@ subprojects {
if (project.file('src/main/config').exists()) {
throw new InvalidModelException("Modules cannot contain config files")
}
-
- // these are implementation details of our build, no need to publish them!
- install.enabled = false
- uploadArchives.enabled = false
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java
index 079ff73846..9e157dc994 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java
@@ -32,22 +32,40 @@ import java.util.Map;
*/
abstract class AbstractStringProcessor extends AbstractProcessor {
private final String field;
+ private final boolean ignoreMissing;
- protected AbstractStringProcessor(String tag, String field) {
+ protected AbstractStringProcessor(String tag, String field, boolean ignoreMissing) {
super(tag);
this.field = field;
+ this.ignoreMissing = ignoreMissing;
}
public String getField() {
return field;
}
+ boolean isIgnoreMissing() {
+ return ignoreMissing;
+ }
+
@Override
public final void execute(IngestDocument document) {
- String val = document.getFieldValue(field, String.class);
- if (val == null) {
+ String val;
+
+ try {
+ val = document.getFieldValue(field, String.class);
+ } catch (IllegalArgumentException e) {
+ if (ignoreMissing && document.hasField(field) != true) {
+ return;
+ }
+ throw e;
+ }
+ if (val == null && ignoreMissing) {
+ return;
+ } else if (val == null) {
throw new IllegalArgumentException("field [" + field + "] is null, cannot process it.");
}
+
document.setFieldValue(field, process(val));
}
@@ -64,9 +82,10 @@ abstract class AbstractStringProcessor extends AbstractProcessor {
public AbstractStringProcessor create(Map<String, Processor.Factory> registry, String tag,
Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(processorType, tag, config, "field");
- return newProcessor(tag, field);
+ boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(processorType, tag, config, "ignore_missing", false);
+ return newProcessor(tag, field, ignoreMissing);
}
- protected abstract AbstractStringProcessor newProcessor(String processorTag, String field);
+ protected abstract AbstractStringProcessor newProcessor(String processorTag, String field, boolean ignoreMissing);
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java
index 85cb8acbc0..7bc6a17152 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java
@@ -80,7 +80,7 @@ public final class AppendProcessor extends AbstractProcessor {
Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value");
TemplateService.Template compiledTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag,
"field", field, templateService);
- return new AppendProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService));
+ return new AppendProcessor(processorTag, compiledTemplate, ValueSource.wrap(value, templateService));
}
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java
index c85e8d17a0..c57d93fa9c 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java
@@ -114,12 +114,14 @@ public final class ConvertProcessor extends AbstractProcessor {
private final String field;
private final String targetField;
private final Type convertType;
+ private final boolean ignoreMissing;
- ConvertProcessor(String tag, String field, String targetField, Type convertType) {
+ ConvertProcessor(String tag, String field, String targetField, Type convertType, boolean ignoreMissing) {
super(tag);
this.field = field;
this.targetField = targetField;
this.convertType = convertType;
+ this.ignoreMissing = ignoreMissing;
}
String getField() {
@@ -134,11 +136,27 @@ public final class ConvertProcessor extends AbstractProcessor {
return convertType;
}
+ boolean isIgnoreMissing() {
+ return ignoreMissing;
+ }
+
@Override
public void execute(IngestDocument document) {
- Object oldValue = document.getFieldValue(field, Object.class);
+ Object oldValue = null;
Object newValue;
- if (oldValue == null) {
+
+ try {
+ oldValue = document.getFieldValue(field, Object.class);
+ } catch (IllegalArgumentException e) {
+ if (ignoreMissing) {
+ return;
+ }
+ throw e;
+ }
+
+ if (oldValue == null && ignoreMissing) {
+ return;
+ } else if (oldValue == null) {
throw new IllegalArgumentException("Field [" + field + "] is null, cannot be converted to type [" + convertType + "]");
}
@@ -168,7 +186,8 @@ public final class ConvertProcessor extends AbstractProcessor {
String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type");
String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field);
Type convertType = Type.fromString(processorTag, "type", typeProperty);
- return new ConvertProcessor(processorTag, field, targetField, convertType);
+ boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false);
+ return new ConvertProcessor(processorTag, field, targetField, convertType, ignoreMissing);
}
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java
new file mode 100644
index 0000000000..bfc3231173
--- /dev/null
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import org.elasticsearch.ingest.AbstractProcessor;
+import org.elasticsearch.ingest.ConfigurationUtils;
+import org.elasticsearch.ingest.IngestDocument;
+import org.elasticsearch.ingest.Processor;
+
+import java.util.Map;
+
+public final class DotExpanderProcessor extends AbstractProcessor {
+
+ static final String TYPE = "dot_expander";
+
+ private final String path;
+ private final String field;
+
+ DotExpanderProcessor(String tag, String path, String field) {
+ super(tag);
+ this.path = path;
+ this.field = field;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void execute(IngestDocument ingestDocument) throws Exception {
+ String path;
+ Map<String, Object> map;
+ if (this.path != null) {
+ path = this.path + "." + field;
+ map = ingestDocument.getFieldValue(this.path, Map.class);
+ } else {
+ path = field;
+ map = ingestDocument.getSourceAndMetadata();
+ }
+
+ if (ingestDocument.hasField(path)) {
+ Object value = map.remove(field);
+ ingestDocument.appendFieldValue(path, value);
+ } else {
+ // check whether we actually can expand the field in question into an object field.
+ // part of the path may already exist and if part of it would be a value field (string, integer etc.)
+ // then we can't override it with an object field and we should fail with a good reason.
+ // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable
+ for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) {
+ String partialPath = path.substring(0, index);
+ if (ingestDocument.hasField(partialPath)) {
+ Object val = ingestDocument.getFieldValue(partialPath, Object.class);
+ if ((val instanceof Map) == false) {
+ throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath +
+ "] is not an object field, but a value field");
+ }
+ } else {
+ break;
+ }
+ }
+ Object value = map.remove(field);
+ ingestDocument.setFieldValue(path, value);
+ }
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ String getPath() {
+ return path;
+ }
+
+ String getField() {
+ return field;
+ }
+
+ public static final class Factory implements Processor.Factory {
+
+ @Override
+ public Processor create(Map<String, Processor.Factory> processorFactories, String tag,
+ Map<String, Object> config) throws Exception {
+ String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field");
+ if (field.contains(".") == false) {
+ throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field",
+ "field does not contain a dot");
+ }
+ if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) {
+ throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field",
+ "Field can't start or end with a dot");
+ }
+ int firstIndex = -1;
+ for (int index = field.indexOf('.'); index != -1; index = field.indexOf('.', index + 1)) {
+ if (index - firstIndex == 1) {
+ throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field",
+ "No space between dots");
+ }
+ firstIndex = index;
+ }
+
+ String path = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "path");
+ return new DotExpanderProcessor(tag, path, field);
+ }
+ }
+}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
index e5a720011a..2a1046acb9 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
@@ -38,6 +38,8 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty;
*
* This can be useful in cases to do string operations on json array of strings,
* or remove a field from objects inside a json array.
+ *
+ * Note that this processor is experimental.
*/
public final class ForEachProcessor extends AbstractProcessor {
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java
index 44528bdac8..4a4432a9bb 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java
@@ -39,21 +39,36 @@ public final class GrokProcessor extends AbstractProcessor {
private final String matchField;
private final Grok grok;
private final boolean traceMatch;
+ private final boolean ignoreMissing;
- public GrokProcessor(String tag, Map<String, String> patternBank, List<String> matchPatterns, String matchField) {
- this(tag, patternBank, matchPatterns, matchField, false);
- }
-
- public GrokProcessor(String tag, Map<String, String> patternBank, List<String> matchPatterns, String matchField, boolean traceMatch) {
+ public GrokProcessor(String tag, Map<String, String> patternBank, List<String> matchPatterns, String matchField,
+ boolean traceMatch, boolean ignoreMissing) {
super(tag);
this.matchField = matchField;
this.grok = new Grok(patternBank, combinePatterns(matchPatterns, traceMatch));
this.traceMatch = traceMatch;
+ this.ignoreMissing = ignoreMissing;
}
@Override
public void execute(IngestDocument ingestDocument) throws Exception {
- String fieldValue = ingestDocument.getFieldValue(matchField, String.class);
+ String fieldValue;
+
+ try {
+ fieldValue = ingestDocument.getFieldValue(matchField, String.class);
+ } catch (IllegalArgumentException e) {
+ if (ignoreMissing && ingestDocument.hasField(matchField) != true) {
+ return;
+ }
+ throw e;
+ }
+
+ if (fieldValue == null && ignoreMissing) {
+ return;
+ } else if (fieldValue == null) {
+ throw new IllegalArgumentException("field [" + matchField + "] is null, cannot process it.");
+ }
+
Map<String, Object> matches = grok.captures(fieldValue);
if (matches == null) {
throw new IllegalArgumentException("Provided Grok expressions do not match field value: [" + fieldValue + "]");
@@ -77,10 +92,14 @@ public final class GrokProcessor extends AbstractProcessor {
return TYPE;
}
- public Grok getGrok() {
+ Grok getGrok() {
return grok;
}
+ boolean isIgnoreMissing() {
+ return ignoreMissing;
+ }
+
String getMatchField() {
return matchField;
}
@@ -128,6 +147,7 @@ public final class GrokProcessor extends AbstractProcessor {
String matchField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
List<String> matchPatterns = ConfigurationUtils.readList(TYPE, processorTag, config, "patterns");
boolean traceMatch = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "trace_match", false);
+ boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false);
if (matchPatterns.isEmpty()) {
throw newConfigurationException(TYPE, processorTag, "patterns", "List of patterns must not be empty");
@@ -139,7 +159,7 @@ public final class GrokProcessor extends AbstractProcessor {
}
try {
- return new GrokProcessor(processorTag, patternBank, matchPatterns, matchField, traceMatch);
+ return new GrokProcessor(processorTag, patternBank, matchPatterns, matchField, traceMatch, ignoreMissing);
} catch (Exception e) {
throw newConfigurationException(TYPE, processorTag, "patterns",
"Invalid regex pattern found in: " + matchPatterns + ". " + e.getMessage());
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java
index c89f6164de..82d316dfa6 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java
@@ -61,6 +61,8 @@ public class IngestCommonPlugin extends Plugin implements IngestPlugin {
processors.put(SortProcessor.TYPE, new SortProcessor.Factory());
processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(builtinPatterns));
processors.put(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService));
+ processors.put(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory());
+ processors.put(JsonProcessor.TYPE, new JsonProcessor.Factory());
return Collections.unmodifiableMap(processors);
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java
new file mode 100644
index 0000000000..024c3aef94
--- /dev/null
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import com.fasterxml.jackson.core.JsonParseException;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.ingest.AbstractProcessor;
+import org.elasticsearch.ingest.ConfigurationUtils;
+import org.elasticsearch.ingest.IngestDocument;
+import org.elasticsearch.ingest.Processor;
+
+import java.util.Map;
+
+/**
+ * Processor that serializes a string-valued field into a
+ * map of maps.
+ */
+public final class JsonProcessor extends AbstractProcessor {
+
+ public static final String TYPE = "json";
+
+ private final String field;
+ private final String targetField;
+
+ JsonProcessor(String tag, String field, String targetField) {
+ super(tag);
+ this.field = field;
+ this.targetField = targetField;
+ }
+
+ public String getField() {
+ return field;
+ }
+
+ public String getTargetField() {
+ return targetField;
+ }
+
+ @Override
+ public void execute(IngestDocument document) throws Exception {
+ String stringValue = document.getFieldValue(field, String.class);
+ try {
+ Map<String, Object> mapValue = JsonXContent.jsonXContent.createParser(stringValue).map();
+ document.setFieldValue(targetField, mapValue);
+ } catch (JsonParseException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ public static final class Factory implements Processor.Factory {
+ @Override
+ public JsonProcessor create(Map<String, Processor.Factory> registry, String processorTag,
+ Map<String, Object> config) throws Exception {
+ String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
+ String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field);
+ return new JsonProcessor(processorTag, field, targetField);
+ }
+ }
+}
+
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java
index a0ae8e1315..14c34b57b6 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java
@@ -30,8 +30,8 @@ public final class LowercaseProcessor extends AbstractStringProcessor {
public static final String TYPE = "lowercase";
- LowercaseProcessor(String processorTag, String field) {
- super(processorTag, field);
+ LowercaseProcessor(String processorTag, String field, boolean ignoreMissing) {
+ super(processorTag, field, ignoreMissing);
}
@Override
@@ -51,8 +51,8 @@ public final class LowercaseProcessor extends AbstractStringProcessor {
}
@Override
- protected LowercaseProcessor newProcessor(String tag, String field) {
- return new LowercaseProcessor(tag, field);
+ protected LowercaseProcessor newProcessor(String tag, String field, boolean ignoreMissing) {
+ return new LowercaseProcessor(tag, field, ignoreMissing);
}
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java
index d6c655fd5c..90a6389d82 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java
@@ -35,11 +35,13 @@ public final class RenameProcessor extends AbstractProcessor {
private final String field;
private final String targetField;
+ private final boolean ignoreMissing;
- RenameProcessor(String tag, String field, String targetField) {
+ RenameProcessor(String tag, String field, String targetField, boolean ignoreMissing) {
super(tag);
this.field = field;
this.targetField = targetField;
+ this.ignoreMissing = ignoreMissing;
}
String getField() {
@@ -50,22 +52,34 @@ public final class RenameProcessor extends AbstractProcessor {
return targetField;
}
+ boolean isIgnoreMissing() {
+ return ignoreMissing;
+ }
+
@Override
public void execute(IngestDocument document) {
- if (document.hasField(field) == false) {
- throw new IllegalArgumentException("field [" + field + "] doesn't exist");
+ if (document.hasField(field, true) == false) {
+ if (ignoreMissing) {
+ return;
+ } else {
+ throw new IllegalArgumentException("field [" + field + "] doesn't exist");
+ }
}
- if (document.hasField(targetField)) {
+ // We fail here if the target field point to an array slot that is out of range.
+ // If we didn't do this then we would fail if we set the value in the target_field
+ // and then on failure processors would not see that value we tried to rename as we already
+ // removed it.
+ if (document.hasField(targetField, true)) {
throw new IllegalArgumentException("field [" + targetField + "] already exists");
}
- Object oldValue = document.getFieldValue(field, Object.class);
- document.setFieldValue(targetField, oldValue);
+ Object value = document.getFieldValue(field, Object.class);
+ document.removeField(field);
try {
- document.removeField(field);
+ document.setFieldValue(targetField, value);
} catch (Exception e) {
- //remove the new field if the removal of the old one failed
- document.removeField(targetField);
+ // setting the value back to the original field shouldn't as we just fetched it from that field:
+ document.setFieldValue(field, value);
throw e;
}
}
@@ -81,7 +95,8 @@ public final class RenameProcessor extends AbstractProcessor {
Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field");
- return new RenameProcessor(processorTag, field, targetField);
+ boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false);
+ return new RenameProcessor(processorTag, field, targetField, ignoreMissing);
}
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java
index bb8606feae..94f335cc12 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java
@@ -19,14 +19,12 @@
package org.elasticsearch.ingest.common;
-import java.util.HashMap;
import java.util.Map;
import org.elasticsearch.common.Strings;
import org.elasticsearch.ingest.AbstractProcessor;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.Processor;
-import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
@@ -35,6 +33,7 @@ import org.elasticsearch.script.ScriptService;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException;
+import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalMap;
import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalStringProperty;
import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty;
import static org.elasticsearch.script.ScriptService.ScriptType.FILE;
@@ -60,10 +59,8 @@ public final class ScriptProcessor extends AbstractProcessor {
@Override
public void execute(IngestDocument document) {
- Map<String, Object> vars = new HashMap<>();
- vars.put("ctx", document.getSourceAndMetadata());
- CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.INGEST, emptyMap());
- ExecutableScript executableScript = scriptService.executable(compiledScript, vars);
+ ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.INGEST, emptyMap());
+ executableScript.setNextVar("ctx", document.getSourceAndMetadata());
executableScript.run();
}
@@ -87,6 +84,7 @@ public final class ScriptProcessor extends AbstractProcessor {
String inline = readOptionalStringProperty(TYPE, processorTag, config, "inline");
String file = readOptionalStringProperty(TYPE, processorTag, config, "file");
String id = readOptionalStringProperty(TYPE, processorTag, config, "id");
+ Map<String, ?> params = readOptionalMap(TYPE, processorTag, config, "params");
boolean containsNoScript = !hasLength(file) && !hasLength(id) && !hasLength(inline);
if (containsNoScript) {
@@ -99,13 +97,17 @@ public final class ScriptProcessor extends AbstractProcessor {
throw newConfigurationException(TYPE, processorTag, null, "Only one of [file], [id], or [inline] may be configured");
}
+ if(params == null) {
+ params = emptyMap();
+ }
+
final Script script;
if (Strings.hasLength(file)) {
- script = new Script(file, FILE, lang, emptyMap());
+ script = new Script(file, FILE, lang, params);
} else if (Strings.hasLength(inline)) {
- script = new Script(inline, INLINE, lang, emptyMap());
+ script = new Script(inline, INLINE, lang, params);
} else if (Strings.hasLength(id)) {
- script = new Script(id, STORED, lang, emptyMap());
+ script = new Script(id, STORED, lang, params);
} else {
throw newConfigurationException(TYPE, processorTag, null, "Could not initialize script");
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java
index e852f887da..4ab84f88a2 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java
@@ -27,8 +27,8 @@ public final class TrimProcessor extends AbstractStringProcessor {
public static final String TYPE = "trim";
- TrimProcessor(String processorTag, String field) {
- super(processorTag, field);
+ TrimProcessor(String processorTag, String field, boolean ignoreMissing) {
+ super(processorTag, field, ignoreMissing);
}
@Override
@@ -48,8 +48,8 @@ public final class TrimProcessor extends AbstractStringProcessor {
}
@Override
- protected TrimProcessor newProcessor(String tag, String field) {
- return new TrimProcessor(tag, field);
+ protected TrimProcessor newProcessor(String tag, String field, boolean ignoreMissing) {
+ return new TrimProcessor(tag, field, ignoreMissing);
}
}
}
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java
index 5585a130ea..61491525cc 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java
@@ -29,8 +29,8 @@ public final class UppercaseProcessor extends AbstractStringProcessor {
public static final String TYPE = "uppercase";
- UppercaseProcessor(String processorTag, String field) {
- super(processorTag, field);
+ UppercaseProcessor(String processorTag, String field, boolean ignoreMissing) {
+ super(processorTag, field, ignoreMissing);
}
@Override
@@ -50,8 +50,8 @@ public final class UppercaseProcessor extends AbstractStringProcessor {
}
@Override
- protected UppercaseProcessor newProcessor(String tag, String field) {
- return new UppercaseProcessor(tag, field);
+ protected UppercaseProcessor newProcessor(String tag, String field, boolean ignoreMissing) {
+ return new UppercaseProcessor(tag, field, ignoreMissing);
}
}
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java
index e987510f25..1c83a72584 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java
@@ -27,12 +27,13 @@ import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
import java.util.HashMap;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public abstract class AbstractStringProcessorTestCase extends ESTestCase {
- protected abstract AbstractStringProcessor newProcessor(String field);
+ protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing);
protected String modifyInput(String input) {
return input;
@@ -44,45 +45,60 @@ public abstract class AbstractStringProcessorTestCase extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
String fieldValue = RandomDocumentPicks.randomString(random());
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, modifyInput(fieldValue));
- Processor processor = newProcessor(fieldName);
+ Processor processor = newProcessor(fieldName, randomBoolean());
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(expectedResult(fieldValue)));
}
public void testFieldNotFound() throws Exception {
String fieldName = RandomDocumentPicks.randomFieldName(random());
- Processor processor = newProcessor(fieldName);
+ Processor processor = newProcessor(fieldName, false);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
- try {
- processor.execute(ingestDocument);
- fail("processor should have failed");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]"));
- }
+ Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
+ assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]"));
+ }
+
+ public void testFieldNotFoundWithIgnoreMissing() throws Exception {
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ Processor processor = newProcessor(fieldName, true);
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNullValue() throws Exception {
- Processor processor = newProcessor("field");
+ Processor processor = newProcessor("field", false);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
- try {
- processor.execute(ingestDocument);
- fail("processor should have failed");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("field [field] is null, cannot process it."));
- }
+ Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
+ assertThat(e.getMessage(), equalTo("field [field] is null, cannot process it."));
+ }
+
+ public void testNullValueWithIgnoreMissing() throws Exception {
+ Processor processor = newProcessor("field", true);
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNonStringValue() throws Exception {
String fieldName = RandomDocumentPicks.randomFieldName(random());
- Processor processor = newProcessor(fieldName);
+ Processor processor = newProcessor(fieldName, false);
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ ingestDocument.setFieldValue(fieldName, randomInt());
+ Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
+ assertThat(e.getMessage(), equalTo("field [" + fieldName +
+ "] of type [java.lang.Integer] cannot be cast to [java.lang.String]"));
+ }
+
+ public void testNonStringValueWithIgnoreMissing() throws Exception {
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ Processor processor = newProcessor(fieldName, true);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
ingestDocument.setFieldValue(fieldName, randomInt());
- try {
- processor.execute(ingestDocument);
- fail("processor should have failed");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("field [" + fieldName +
- "] of type [java.lang.Integer] cannot be cast to [java.lang.String]"));
- }
+ Exception e = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
+ assertThat(e.getMessage(), equalTo("field [" + fieldName +
+ "] of type [java.lang.Integer] cannot be cast to [java.lang.String]"));
}
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java
index bc04053f90..afdb7f7841 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java
@@ -27,6 +27,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
public class ConvertProcessorFactoryTests extends ESTestCase {
@@ -43,6 +44,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase {
assertThat(convertProcessor.getField(), equalTo("field1"));
assertThat(convertProcessor.getTargetField(), equalTo("field1"));
assertThat(convertProcessor.getConvertType(), equalTo(type));
+ assertThat(convertProcessor.isIgnoreMissing(), is(false));
}
public void testCreateUnsupportedType() throws Exception {
@@ -100,5 +102,22 @@ public class ConvertProcessorFactoryTests extends ESTestCase {
assertThat(convertProcessor.getField(), equalTo("field1"));
assertThat(convertProcessor.getTargetField(), equalTo("field2"));
assertThat(convertProcessor.getConvertType(), equalTo(type));
+ assertThat(convertProcessor.isIgnoreMissing(), is(false));
+ }
+
+ public void testCreateWithIgnoreMissing() throws Exception {
+ ConvertProcessor.Factory factory = new ConvertProcessor.Factory();
+ Map<String, Object> config = new HashMap<>();
+ ConvertProcessor.Type type = randomFrom(ConvertProcessor.Type.values());
+ config.put("field", "field1");
+ config.put("type", type.toString());
+ config.put("ignore_missing", true);
+ String processorTag = randomAsciiOfLength(10);
+ ConvertProcessor convertProcessor = factory.create(null, processorTag, config);
+ assertThat(convertProcessor.getTag(), equalTo(processorTag));
+ assertThat(convertProcessor.getField(), equalTo("field1"));
+ assertThat(convertProcessor.getTargetField(), equalTo("field1"));
+ assertThat(convertProcessor.getConvertType(), equalTo(type));
+ assertThat(convertProcessor.isIgnoreMissing(), is(true));
}
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java
index 02f859ecfa..f8a8a24286 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java
@@ -31,6 +31,7 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.elasticsearch.ingest.common.ConvertProcessor.Type;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
@@ -42,7 +43,7 @@ public class ConvertProcessorTests extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int randomInt = randomInt();
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomInt);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(randomInt));
}
@@ -58,7 +59,7 @@ public class ConvertProcessorTests extends ESTestCase {
expectedList.add(randomInt);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
}
@@ -69,7 +70,7 @@ public class ConvertProcessorTests extends ESTestCase {
String value = "string-" + randomAsciiOfLengthBetween(1, 10);
ingestDocument.setFieldValue(fieldName, value);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -85,7 +86,7 @@ public class ConvertProcessorTests extends ESTestCase {
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomFloat);
expectedResult.put(fieldName, randomFloat);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, Float.class), equalTo(randomFloat));
}
@@ -101,7 +102,7 @@ public class ConvertProcessorTests extends ESTestCase {
expectedList.add(randomFloat);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
}
@@ -112,7 +113,7 @@ public class ConvertProcessorTests extends ESTestCase {
String value = "string-" + randomAsciiOfLengthBetween(1, 10);
ingestDocument.setFieldValue(fieldName, value);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -130,7 +131,7 @@ public class ConvertProcessorTests extends ESTestCase {
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, booleanString);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, Boolean.class), equalTo(randomBoolean));
}
@@ -150,7 +151,7 @@ public class ConvertProcessorTests extends ESTestCase {
expectedList.add(randomBoolean);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
}
@@ -167,7 +168,7 @@ public class ConvertProcessorTests extends ESTestCase {
}
ingestDocument.setFieldValue(fieldName, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -201,7 +202,7 @@ public class ConvertProcessorTests extends ESTestCase {
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(expectedFieldValue));
}
@@ -237,7 +238,7 @@ public class ConvertProcessorTests extends ESTestCase {
expectedList.add(randomValueString);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
}
@@ -246,7 +247,7 @@ public class ConvertProcessorTests extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
String fieldName = RandomDocumentPicks.randomFieldName(random());
Type type = randomFrom(Type.values());
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, type);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, type, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -258,7 +259,7 @@ public class ConvertProcessorTests extends ESTestCase {
public void testConvertNullField() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
Type type = randomFrom(Type.values());
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", type);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", type, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -267,6 +268,25 @@ public class ConvertProcessorTests extends ESTestCase {
}
}
+ public void testConvertNonExistingFieldWithIgnoreMissing() throws Exception {
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ Type type = randomFrom(Type.values());
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, type, true);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
+ }
+
+ public void testConvertNullFieldWithIgnoreMissing() throws Exception {
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ Type type = randomFrom(Type.values());
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", type, true);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
+ }
+
public void testAutoConvertNotString() throws Exception {
Object randomValue;
switch(randomIntBetween(0, 2)) {
@@ -286,7 +306,7 @@ public class ConvertProcessorTests extends ESTestCase {
throw new UnsupportedOperationException();
}
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomValue));
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO, false);
processor.execute(ingestDocument);
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
assertThat(convertedValue, sameInstance(randomValue));
@@ -295,7 +315,7 @@ public class ConvertProcessorTests extends ESTestCase {
public void testAutoConvertStringNotMatched() throws Exception {
String value = "notAnIntFloatOrBool";
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", value));
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO, false);
processor.execute(ingestDocument);
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
assertThat(convertedValue, sameInstance(value));
@@ -306,7 +326,7 @@ public class ConvertProcessorTests extends ESTestCase {
String booleanString = Boolean.toString(randomBoolean);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(),
Collections.singletonMap("field", booleanString));
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO, false);
processor.execute(ingestDocument);
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
assertThat(convertedValue, equalTo(randomBoolean));
@@ -316,7 +336,7 @@ public class ConvertProcessorTests extends ESTestCase {
int randomInt = randomInt();
String randomString = Integer.toString(randomInt);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString));
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO, false);
processor.execute(ingestDocument);
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
assertThat(convertedValue, equalTo(randomInt));
@@ -326,7 +346,7 @@ public class ConvertProcessorTests extends ESTestCase {
float randomFloat = randomFloat();
String randomString = Float.toString(randomFloat);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString));
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO, false);
processor.execute(ingestDocument);
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
assertThat(convertedValue, equalTo(randomFloat));
@@ -337,10 +357,9 @@ public class ConvertProcessorTests extends ESTestCase {
int randomInt = randomInt();
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, String.valueOf(randomInt));
String targetField = fieldName + randomAsciiOfLength(5);
- Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, targetField, Type.INTEGER);
+ Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, targetField, Type.INTEGER, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt)));
assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt));
-
}
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java
new file mode 100644
index 0000000000..be0695924e
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DotExpanderProcessorFactoryTests extends ESTestCase {
+
+ public void testCreate() throws Exception {
+ DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
+
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", "_field.field");
+ config.put("path", "_path");
+ DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", config);
+ assertThat(processor.getField(), equalTo("_field.field"));
+ assertThat(processor.getPath(), equalTo("_path"));
+
+ config = new HashMap<>();
+ config.put("field", "_field.field");
+ processor = (DotExpanderProcessor) factory.create(null, "_tag", config);
+ assertThat(processor.getField(), equalTo("_field.field"));
+ assertThat(processor.getPath(), nullValue());
+ }
+
+ public void testValidFields() throws Exception {
+ DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
+
+ String[] fields = new String[] {"a.b", "a.b.c", "a.b.c.d", "ab.cd"};
+ for (String field : fields) {
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", field);
+ config.put("path", "_path");
+ DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", config);
+ assertThat(processor.getField(), equalTo(field));
+ assertThat(processor.getPath(), equalTo("_path"));
+ }
+ }
+
+ public void testCreate_fieldMissing() throws Exception {
+ DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
+
+ Map<String, Object> config = new HashMap<>();
+ config.put("path", "_path");
+ Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config));
+ assertThat(e.getMessage(), equalTo("[field] required property is missing"));
+ }
+
+ public void testCreate_invalidFields() throws Exception {
+ DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
+ String[] fields = new String[] {"a", "abc"};
+ for (String field : fields) {
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", field);
+ Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config));
+ assertThat(e.getMessage(), equalTo("[field] field does not contain a dot"));
+ }
+
+ fields = new String[] {".a", "a.", "."};
+ for (String field : fields) {
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", field);
+ Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config));
+ assertThat(e.getMessage(), equalTo("[field] Field can't start or end with a dot"));
+ }
+
+ fields = new String[] {"a..b", "a...b", "a.b..c", "abc.def..hij"};
+ for (String field : fields) {
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", field);
+ Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config));
+ assertThat(e.getMessage(), equalTo("[field] No space between dots"));
+ }
+ }
+
+}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java
new file mode 100644
index 0000000000..199c475a8f
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import org.elasticsearch.ingest.IngestDocument;
+import org.elasticsearch.ingest.Processor;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class DotExpanderProcessorTests extends ESTestCase {
+
+ public void testEscapeFields() throws Exception {
+ Map<String, Object> source = new HashMap<>();
+ source.put("foo.bar", "baz1");
+ IngestDocument document = new IngestDocument(source, Collections.emptyMap());
+ DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, "foo.bar");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1"));
+
+ source = new HashMap<>();
+ source.put("foo.bar.baz", "value");
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar.baz");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("value"));
+
+ source = new HashMap<>();
+ source.put("foo.bar", "baz1");
+ source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2")));
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar");
+ processor.execute(document);
+ assertThat(document.getSourceAndMetadata().size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", List.class).size(), equalTo(2));
+ assertThat(document.getFieldValue("foo.bar.0", String.class), equalTo("baz2"));
+ assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("baz1"));
+
+ source = new HashMap<>();
+ source.put("foo.bar", "2");
+ source.put("foo", new HashMap<>(Collections.singletonMap("bar", 1)));
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar");
+ processor.execute(document);
+ assertThat(document.getSourceAndMetadata().size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", List.class).size(), equalTo(2));
+ assertThat(document.getFieldValue("foo.bar.0", Integer.class), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("2"));
+ }
+
+ public void testEscapeFields_valueField() throws Exception {
+ Map<String, Object> source = new HashMap<>();
+ source.put("foo.bar", "baz1");
+ source.put("foo", "baz2");
+ IngestDocument document1 = new IngestDocument(source, Collections.emptyMap());
+ Processor processor1 = new DotExpanderProcessor("_tag", null, "foo.bar");
+ // foo already exists and if a leaf field and therefor can't be replaced by a map field:
+ Exception e = expectThrows(IllegalArgumentException.class, () -> processor1.execute(document1));
+ assertThat(e.getMessage(), equalTo("cannot expend [foo.bar], because [foo] is not an object field, but a value field"));
+
+ // so because foo is no branch field but a value field the `foo.bar` field can't be expanded
+ // into [foo].[bar], so foo should be renamed first into `[foo].[bar]:
+ IngestDocument document = new IngestDocument(source, Collections.emptyMap());
+ Processor processor = new RenameProcessor("_tag", "foo", "foo.bar", false);
+ processor.execute(document);
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar.0", String.class), equalTo("baz2"));
+ assertThat(document.getFieldValue("foo.bar.1", String.class), equalTo("baz1"));
+
+ source = new HashMap<>();
+ source.put("foo.bar", "baz1");
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1"));
+
+ source = new HashMap<>();
+ source.put("foo.bar.baz", "baz1");
+ source.put("foo", new HashMap<>(Collections.singletonMap("bar", new HashMap<>())));
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", null, "foo.bar.baz");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("baz1"));
+
+ source = new HashMap<>();
+ source.put("foo.bar.baz", "baz1");
+ source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2")));
+ IngestDocument document2 = new IngestDocument(source, Collections.emptyMap());
+ Processor processor2 = new DotExpanderProcessor("_tag", null, "foo.bar.baz");
+ e = expectThrows(IllegalArgumentException.class, () -> processor2.execute(document2));
+ assertThat(e.getMessage(), equalTo("cannot expend [foo.bar.baz], because [foo.bar] is not an object field, but a value field"));
+ }
+
+ public void testEscapeFields_path() throws Exception {
+ Map<String, Object> source = new HashMap<>();
+ source.put("foo", new HashMap<>(Collections.singletonMap("bar.baz", "value")));
+ IngestDocument document = new IngestDocument(source, Collections.emptyMap());
+ DotExpanderProcessor processor = new DotExpanderProcessor("_tag", "foo", "bar.baz");
+ processor.execute(document);
+ assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("value"));
+
+ source = new HashMap<>();
+ source.put("field", new HashMap<>(Collections.singletonMap("foo.bar.baz", "value")));
+ document = new IngestDocument(source, Collections.emptyMap());
+ processor = new DotExpanderProcessor("_tag", "field", "foo.bar.baz");
+ processor.execute(document);
+ assertThat(document.getFieldValue("field.foo", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("field.foo.bar", Map.class).size(), equalTo(1));
+ assertThat(document.getFieldValue("field.foo.bar.baz", String.class), equalTo("value"));
+ }
+
+}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java
index 7a7b59bad2..edbc001569 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java
@@ -49,7 +49,7 @@ public class ForEachProcessorTests extends ESTestCase {
);
ForEachProcessor processor = new ForEachProcessor(
- "_tag", "values", new UppercaseProcessor("_tag", "_ingest._value")
+ "_tag", "values", new UppercaseProcessor("_tag", "_ingest._value", false)
);
processor.execute(ingestDocument);
@@ -197,7 +197,7 @@ public class ForEachProcessorTests extends ESTestCase {
ForEachProcessor processor = new ForEachProcessor(
"_tag", "values", new CompoundProcessor(false,
- Collections.singletonList(new UppercaseProcessor("_tag_upper", "_ingest._value")),
+ Collections.singletonList(new UppercaseProcessor("_tag_upper", "_ingest._value", false)),
Collections.singletonList(new AppendProcessor("_tag",
ts.compile("errors"), (model) -> (Collections.singletonList("added"))))
));
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java
index 20537d2ced..37aa8adca9 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java
@@ -27,6 +27,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
public class GrokProcessorFactoryTests extends ESTestCase {
@@ -42,6 +43,22 @@ public class GrokProcessorFactoryTests extends ESTestCase {
assertThat(processor.getTag(), equalTo(processorTag));
assertThat(processor.getMatchField(), equalTo("_field"));
assertThat(processor.getGrok(), notNullValue());
+ assertThat(processor.isIgnoreMissing(), is(false));
+ }
+
+ public void testBuildWithIgnoreMissing() throws Exception {
+ GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap());
+
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", "_field");
+ config.put("patterns", Collections.singletonList("(?<foo>\\w+)"));
+ config.put("ignore_missing", true);
+ String processorTag = randomAsciiOfLength(10);
+ GrokProcessor processor = factory.create(null, processorTag, config);
+ assertThat(processor.getTag(), equalTo(processorTag));
+ assertThat(processor.getMatchField(), equalTo("_field"));
+ assertThat(processor.getGrok(), notNullValue());
+ assertThat(processor.isIgnoreMissing(), is(true));
}
public void testBuildMissingField() throws Exception {
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java
index f108e80e79..ce1507d8b6 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java
@@ -28,6 +28,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.equalTo;
@@ -38,7 +39,7 @@ public class GrokProcessorTests extends ESTestCase {
IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
doc.setFieldValue(fieldName, "1");
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
- Collections.singletonList("%{ONE:one}"), fieldName);
+ Collections.singletonList("%{ONE:one}"), fieldName, false, false);
processor.execute(doc);
assertThat(doc.getFieldValue("one", String.class), equalTo("1"));
}
@@ -48,7 +49,7 @@ public class GrokProcessorTests extends ESTestCase {
IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
doc.setFieldValue(fieldName, "23");
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
- Collections.singletonList("%{ONE:one}"), fieldName);
+ Collections.singletonList("%{ONE:one}"), fieldName, false, false);
Exception e = expectThrows(Exception.class, () -> processor.execute(doc));
assertThat(e.getMessage(), equalTo("Provided Grok expressions do not match field value: [23]"));
}
@@ -59,17 +60,48 @@ public class GrokProcessorTests extends ESTestCase {
originalDoc.setFieldValue(fieldName, fieldName);
IngestDocument doc = new IngestDocument(originalDoc);
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.emptyMap(),
- Collections.singletonList(fieldName), fieldName);
+ Collections.singletonList(fieldName), fieldName, false, false);
processor.execute(doc);
assertThat(doc, equalTo(originalDoc));
}
+ public void testNullField() {
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ doc.setFieldValue(fieldName, null);
+ GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
+ Collections.singletonList("%{ONE:one}"), fieldName, false, false);
+ Exception e = expectThrows(Exception.class, () -> processor.execute(doc));
+ assertThat(e.getMessage(), equalTo("field [" + fieldName + "] is null, cannot process it."));
+ }
+
+ public void testNullFieldWithIgnoreMissing() throws Exception {
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ originalIngestDocument.setFieldValue(fieldName, null);
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
+ Collections.singletonList("%{ONE:one}"), fieldName, false, true);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
+ }
+
public void testNotStringField() {
String fieldName = RandomDocumentPicks.randomFieldName(random());
IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
doc.setFieldValue(fieldName, 1);
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
- Collections.singletonList("%{ONE:one}"), fieldName);
+ Collections.singletonList("%{ONE:one}"), fieldName, false, false);
+ Exception e = expectThrows(Exception.class, () -> processor.execute(doc));
+ assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.Integer] cannot be cast to [java.lang.String]"));
+ }
+
+ public void testNotStringFieldWithIgnoreMissing() {
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ doc.setFieldValue(fieldName, 1);
+ GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
+ Collections.singletonList("%{ONE:one}"), fieldName, false, true);
Exception e = expectThrows(Exception.class, () -> processor.execute(doc));
assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.Integer] cannot be cast to [java.lang.String]"));
}
@@ -78,11 +110,21 @@ public class GrokProcessorTests extends ESTestCase {
String fieldName = "foo.bar";
IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
- Collections.singletonList("%{ONE:one}"), fieldName);
+ Collections.singletonList("%{ONE:one}"), fieldName, false, false);
Exception e = expectThrows(Exception.class, () -> processor.execute(doc));
assertThat(e.getMessage(), equalTo("field [foo] not present as part of path [foo.bar]"));
}
+ public void testMissingFieldWithIgnoreMissing() throws Exception {
+ String fieldName = "foo.bar";
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), Collections.singletonMap("ONE", "1"),
+ Collections.singletonList("%{ONE:one}"), fieldName, false, true);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
+ }
+
public void testMultiplePatternsWithMatchReturn() throws Exception {
String fieldName = RandomDocumentPicks.randomFieldName(random());
IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
@@ -92,7 +134,7 @@ public class GrokProcessorTests extends ESTestCase {
patternBank.put("TWO", "2");
patternBank.put("THREE", "3");
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), patternBank,
- Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName);
+ Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName, false, false);
processor.execute(doc);
assertThat(doc.hasField("one"), equalTo(false));
assertThat(doc.getFieldValue("two", String.class), equalTo("2"));
@@ -108,7 +150,7 @@ public class GrokProcessorTests extends ESTestCase {
patternBank.put("TWO", "2");
patternBank.put("THREE", "3");
GrokProcessor processor = new GrokProcessor(randomAsciiOfLength(10), patternBank,
- Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName, true);
+ Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName, true, false);
processor.execute(doc);
assertThat(doc.hasField("one"), equalTo(false));
assertThat(doc.getFieldValue("two", String.class), equalTo("2"));
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java
new file mode 100644
index 0000000000..6b935b8795
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.ingest.TestTemplateService;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.Before;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class JsonProcessorFactoryTests extends ESTestCase {
+
+ private static final JsonProcessor.Factory FACTORY = new JsonProcessor.Factory();
+
+ public void testCreate() throws Exception {
+ String processorTag = randomAsciiOfLength(10);
+ String randomField = randomAsciiOfLength(10);
+ String randomTargetField = randomAsciiOfLength(5);
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", randomField);
+ config.put("target_field", randomTargetField);
+ JsonProcessor jsonProcessor = FACTORY.create(null, processorTag, config);
+ assertThat(jsonProcessor.getTag(), equalTo(processorTag));
+ assertThat(jsonProcessor.getField(), equalTo(randomField));
+ assertThat(jsonProcessor.getTargetField(), equalTo(randomTargetField));
+ }
+
+ public void testCreateWithDefaultTarget() throws Exception {
+ String processorTag = randomAsciiOfLength(10);
+ String randomField = randomAsciiOfLength(10);
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", randomField);
+ JsonProcessor jsonProcessor = FACTORY.create(null, processorTag, config);
+ assertThat(jsonProcessor.getTag(), equalTo(processorTag));
+ assertThat(jsonProcessor.getField(), equalTo(randomField));
+ assertThat(jsonProcessor.getTargetField(), equalTo(randomField));
+ }
+
+ public void testCreateWithMissingField() throws Exception {
+ Map<String, Object> config = new HashMap<>();
+ String processorTag = randomAsciiOfLength(10);
+ ElasticsearchException exception = expectThrows(ElasticsearchParseException.class,
+ () -> FACTORY.create(null, processorTag, config));
+ assertThat(exception.getMessage(), equalTo("[field] required property is missing"));
+ }
+}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java
new file mode 100644
index 0000000000..c62ebbb12a
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.common;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.ingest.IngestDocument;
+import org.elasticsearch.ingest.RandomDocumentPicks;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
+import static org.hamcrest.Matchers.equalTo;
+
+public class JsonProcessorTests extends ESTestCase {
+
+ @SuppressWarnings("unchecked")
+ public void testExecute() throws Exception {
+ String processorTag = randomAsciiOfLength(3);
+ String randomField = randomAsciiOfLength(3);
+ String randomTargetField = randomAsciiOfLength(2);
+ JsonProcessor jsonProcessor = new JsonProcessor(processorTag, randomField, randomTargetField);
+ Map<String, Object> document = new HashMap<>();
+
+ Map<String, Object> randomJsonMap = RandomDocumentPicks.randomSource(random());
+ XContentBuilder builder = JsonXContent.contentBuilder().map(randomJsonMap);
+ String randomJson = XContentHelper.convertToJson(builder.bytes(), false);
+ document.put(randomField, randomJson);
+
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ jsonProcessor.execute(ingestDocument);
+ Map<String, Object> jsonified = ingestDocument.getFieldValue(randomTargetField, Map.class);
+ assertIngestDocument(ingestDocument.getFieldValue(randomTargetField, Object.class), jsonified);
+ }
+
+ public void testInvalidJson() {
+ JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field");
+ Map<String, Object> document = new HashMap<>();
+ document.put("field", "invalid json");
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+
+ Exception exception = expectThrows(IllegalArgumentException.class, () -> jsonProcessor.execute(ingestDocument));
+ assertThat(exception.getMessage(), equalTo("com.fasterxml.jackson.core.JsonParseException: Unrecognized token" +
+ " 'invalid': was expecting ('true', 'false' or 'null')\n" +
+ " at [Source: invalid json; line: 1, column: 8]"));
+ }
+
+ public void testFieldMissing() {
+ JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field");
+ Map<String, Object> document = new HashMap<>();
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+
+ Exception exception = expectThrows(IllegalArgumentException.class, () -> jsonProcessor.execute(ingestDocument));
+ assertThat(exception.getMessage(), equalTo("field [field] not present as part of path [field]"));
+ }
+}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java
index 4d98efc4bb..2c80071b80 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.is;
public class LowercaseProcessorFactoryTests extends ESTestCase {
@@ -37,6 +38,19 @@ public class LowercaseProcessorFactoryTests extends ESTestCase {
LowercaseProcessor uppercaseProcessor = (LowercaseProcessor)factory.create(null, processorTag, config);
assertThat(uppercaseProcessor.getTag(), equalTo(processorTag));
assertThat(uppercaseProcessor.getField(), equalTo("field1"));
+ assertThat(uppercaseProcessor.isIgnoreMissing(), is(false));
+ }
+
+ public void testCreateWithIgnoreMissing() throws Exception {
+ LowercaseProcessor.Factory factory = new LowercaseProcessor.Factory();
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", "field1");
+ config.put("ignore_missing", true);
+ String processorTag = randomAsciiOfLength(10);
+ LowercaseProcessor uppercaseProcessor = (LowercaseProcessor)factory.create(null, processorTag, config);
+ assertThat(uppercaseProcessor.getTag(), equalTo(processorTag));
+ assertThat(uppercaseProcessor.getField(), equalTo("field1"));
+ assertThat(uppercaseProcessor.isIgnoreMissing(), is(true));
}
public void testCreateMissingField() throws Exception {
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java
index 413b3f6465..560a949f15 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java
@@ -23,8 +23,8 @@ import java.util.Locale;
public class LowercaseProcessorTests extends AbstractStringProcessorTestCase {
@Override
- protected AbstractStringProcessor newProcessor(String field) {
- return new LowercaseProcessor(randomAsciiOfLength(10), field);
+ protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing) {
+ return new LowercaseProcessor(randomAsciiOfLength(10), field, ignoreMissing);
}
@Override
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java
index 68f28cb30d..b631b78bf0 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java
@@ -39,6 +39,21 @@ public class RenameProcessorFactoryTests extends ESTestCase {
assertThat(renameProcessor.getTag(), equalTo(processorTag));
assertThat(renameProcessor.getField(), equalTo("old_field"));
assertThat(renameProcessor.getTargetField(), equalTo("new_field"));
+ assertThat(renameProcessor.isIgnoreMissing(), equalTo(false));
+ }
+
+ public void testCreateWithIgnoreMissing() throws Exception {
+ RenameProcessor.Factory factory = new RenameProcessor.Factory();
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", "old_field");
+ config.put("target_field", "new_field");
+ config.put("ignore_missing", true);
+ String processorTag = randomAsciiOfLength(10);
+ RenameProcessor renameProcessor = factory.create(null, processorTag, config);
+ assertThat(renameProcessor.getTag(), equalTo(processorTag));
+ assertThat(renameProcessor.getField(), equalTo("old_field"));
+ assertThat(renameProcessor.getTargetField(), equalTo("new_field"));
+ assertThat(renameProcessor.isIgnoreMissing(), equalTo(true));
}
public void testCreateNoFieldPresent() throws Exception {
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java
index dc4f732879..9fae812822 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.nullValue;
@@ -44,7 +45,7 @@ public class RenameProcessorTests extends ESTestCase {
do {
newFieldName = RandomDocumentPicks.randomFieldName(random());
} while (RandomDocumentPicks.canAddField(newFieldName, ingestDocument) == false || newFieldName.equals(fieldName));
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName, newFieldName);
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName, newFieldName, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(newFieldName, Object.class), equalTo(fieldValue));
}
@@ -62,7 +63,7 @@ public class RenameProcessorTests extends ESTestCase {
document.put("one", one);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list.0", "item");
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list.0", "item", false);
processor.execute(ingestDocument);
Object actualObject = ingestDocument.getSourceAndMetadata().get("list");
assertThat(actualObject, instanceOf(List.class));
@@ -75,7 +76,7 @@ public class RenameProcessorTests extends ESTestCase {
assertThat(actualObject, instanceOf(String.class));
assertThat(actualObject, equalTo("item1"));
- processor = new RenameProcessor(randomAsciiOfLength(10), "list.0", "list.3");
+ processor = new RenameProcessor(randomAsciiOfLength(10), "list.0", "list.3", false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -90,7 +91,8 @@ public class RenameProcessorTests extends ESTestCase {
public void testRenameNonExistingField() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
String fieldName = RandomDocumentPicks.randomFieldName(random());
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName, RandomDocumentPicks.randomFieldName(random()));
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName,
+ RandomDocumentPicks.randomFieldName(random()), false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -99,11 +101,21 @@ public class RenameProcessorTests extends ESTestCase {
}
}
+ public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception {
+ IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
+ IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
+ String fieldName = RandomDocumentPicks.randomFieldName(random());
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName,
+ RandomDocumentPicks.randomFieldName(random()), true);
+ processor.execute(ingestDocument);
+ assertIngestDocument(originalIngestDocument, ingestDocument);
+ }
+
public void testRenameNewFieldAlreadyExists() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
String fieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument);
Processor processor = new RenameProcessor(randomAsciiOfLength(10), RandomDocumentPicks.randomExistingFieldName(
- random(), ingestDocument), fieldName);
+ random(), ingestDocument), fieldName, false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -117,7 +129,7 @@ public class RenameProcessorTests extends ESTestCase {
String fieldName = RandomDocumentPicks.randomFieldName(random());
ingestDocument.setFieldValue(fieldName, null);
String newFieldName = RandomDocumentPicks.randomFieldName(random());
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName, newFieldName);
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), fieldName, newFieldName, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.hasField(fieldName), equalTo(false));
assertThat(ingestDocument.hasField(newFieldName), equalTo(true));
@@ -137,7 +149,7 @@ public class RenameProcessorTests extends ESTestCase {
source.put("list", Collections.singletonList("item"));
IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap());
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list", "new_field");
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list", "new_field", false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -161,7 +173,7 @@ public class RenameProcessorTests extends ESTestCase {
source.put("list", Collections.singletonList("item"));
IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap());
- Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list", "new_field");
+ Processor processor = new RenameProcessor(randomAsciiOfLength(10), "list", "new_field", false);
try {
processor.execute(ingestDocument);
fail("processor execute should have failed");
@@ -171,4 +183,27 @@ public class RenameProcessorTests extends ESTestCase {
assertThat(ingestDocument.getSourceAndMetadata().containsKey("new_field"), equalTo(false));
}
}
+
+ public void testRenameLeafIntoBranch() throws Exception {
+ Map<String, Object> source = new HashMap<>();
+ source.put("foo", "bar");
+ IngestDocument ingestDocument = new IngestDocument(source, Collections.emptyMap());
+ Processor processor1 = new RenameProcessor(randomAsciiOfLength(10), "foo", "foo.bar", false);
+ processor1.execute(ingestDocument);
+ assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Collections.singletonMap("bar", "bar")));
+ assertThat(ingestDocument.getFieldValue("foo.bar", String.class), equalTo("bar"));
+
+ Processor processor2 = new RenameProcessor(randomAsciiOfLength(10), "foo.bar", "foo.bar.baz", false);
+ processor2.execute(ingestDocument);
+ assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Collections.singletonMap("bar",
+ Collections.singletonMap("baz", "bar"))));
+ assertThat(ingestDocument.getFieldValue("foo.bar", Map.class), equalTo(Collections.singletonMap("baz", "bar")));
+ assertThat(ingestDocument.getFieldValue("foo.bar.baz", String.class), equalTo("bar"));
+
+ // for fun lets try to restore it (which don't allow today)
+ Processor processor3 = new RenameProcessor(randomAsciiOfLength(10), "foo.bar.baz", "foo", false);
+ Exception e = expectThrows(IllegalArgumentException.class, () -> processor3.execute(ingestDocument));
+ assertThat(e.getMessage(), equalTo("field [foo] already exists"));
+ }
+
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java
index ef517d986c..27eeb80670 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java
@@ -56,7 +56,7 @@ public class ScriptProcessorFactoryTests extends ESTestCase {
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
() -> factory.create(null, randomAsciiOfLength(10), configMap));
- assertThat(exception.getMessage(), is("[null] Only one of [file], [id], or [inline] may be configured"));
+ assertThat(exception.getMessage(), is("Only one of [file], [id], or [inline] may be configured"));
}
public void testFactoryValidationAtLeastOneScriptingType() throws Exception {
@@ -66,6 +66,6 @@ public class ScriptProcessorFactoryTests extends ESTestCase {
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
() -> factory.create(null, randomAsciiOfLength(10), configMap));
- assertThat(exception.getMessage(), is("[null] Need [file], [id], or [inline] parameter to refer to scripts"));
+ assertThat(exception.getMessage(), is("Need [file], [id], or [inline] parameter to refer to scripts"));
}
}
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java
index b8e2f07a31..c32b0f101a 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java
@@ -46,11 +46,9 @@ public class ScriptProcessorTests extends ESTestCase {
int randomBytesTotal = randomBytesIn + randomBytesOut;
ScriptService scriptService = mock(ScriptService.class);
- CompiledScript compiledScript = mock(CompiledScript.class);
Script script = new Script("_script");
- when(scriptService.compile(any(), any(), any())).thenReturn(compiledScript);
ExecutableScript executableScript = mock(ExecutableScript.class);
- when(scriptService.executable(any(), any())).thenReturn(executableScript);
+ when(scriptService.executable(any(), any(), any())).thenReturn(executableScript);
Map<String, Object> document = new HashMap<>();
document.put("bytes_in", randomInt());
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java
index 1bd57c79ac..bcc0dde940 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.is;
public class TrimProcessorFactoryTests extends ESTestCase {
@@ -37,6 +38,19 @@ public class TrimProcessorFactoryTests extends ESTestCase {
TrimProcessor uppercaseProcessor = (TrimProcessor)factory.create(null, processorTag, config);
assertThat(uppercaseProcessor.getTag(), equalTo(processorTag));
assertThat(uppercaseProcessor.getField(), equalTo("field1"));
+ assertThat(uppercaseProcessor.isIgnoreMissing(), is(false));
+ }
+
+ public void testCreateWithIgnoreMissing() throws Exception {
+ TrimProcessor.Factory factory = new TrimProcessor.Factory();
+ Map<String, Object> config = new HashMap<>();
+ config.put("field", "field1");
+ config.put("ignore_missing", true);
+ String processorTag = randomAsciiOfLength(10);
+ TrimProcessor uppercaseProcessor = (TrimProcessor)factory.create(null, processorTag, config);
+ assertThat(uppercaseProcessor.getTag(), equalTo(processorTag));
+ assertThat(uppercaseProcessor.getField(), equalTo("field1"));
+ assertThat(uppercaseProcessor.isIgnoreMissing(), is(true));
}
public void testCreateMissingField() throws Exception {
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java
index 4b776a2ee8..ce21cf3e38 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java
@@ -22,8 +22,8 @@ package org.elasticsearch.ingest.common;
public class TrimProcessorTests extends AbstractStringProcessorTestCase {
@Override
- protected AbstractStringProcessor newProcessor(String field) {
- return new TrimProcessor(randomAsciiOfLength(10), field);
+ protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing) {
+ return new TrimProcessor(randomAsciiOfLength(10), field, ignoreMissing);
}
@Override
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java
index ff8dc16b6e..e52c240928 100644
--- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java
+++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java
@@ -24,8 +24,8 @@ import java.util.Locale;
public class UppercaseProcessorTests extends AbstractStringProcessorTestCase {
@Override
- protected AbstractStringProcessor newProcessor(String field) {
- return new UppercaseProcessor(randomAsciiOfLength(10), field);
+ protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing) {
+ return new UppercaseProcessor(randomAsciiOfLength(10), field, ignoreMissing);
}
@Override
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml
index 14f58369df..87c1f5a8ab 100644
--- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml
+++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yaml
@@ -13,17 +13,19 @@
- match: { nodes.$master.ingest.processors.1.type: convert }
- match: { nodes.$master.ingest.processors.2.type: date }
- match: { nodes.$master.ingest.processors.3.type: date_index_name }
- - match: { nodes.$master.ingest.processors.4.type: fail }
- - match: { nodes.$master.ingest.processors.5.type: foreach }
- - match: { nodes.$master.ingest.processors.6.type: grok }
- - match: { nodes.$master.ingest.processors.7.type: gsub }
- - match: { nodes.$master.ingest.processors.8.type: join }
- - match: { nodes.$master.ingest.processors.9.type: lowercase }
- - match: { nodes.$master.ingest.processors.10.type: remove }
- - match: { nodes.$master.ingest.processors.11.type: rename }
- - match: { nodes.$master.ingest.processors.12.type: script }
- - match: { nodes.$master.ingest.processors.13.type: set }
- - match: { nodes.$master.ingest.processors.14.type: sort }
- - match: { nodes.$master.ingest.processors.15.type: split }
- - match: { nodes.$master.ingest.processors.16.type: trim }
- - match: { nodes.$master.ingest.processors.17.type: uppercase }
+ - match: { nodes.$master.ingest.processors.4.type: dot_expander }
+ - match: { nodes.$master.ingest.processors.5.type: fail }
+ - match: { nodes.$master.ingest.processors.6.type: foreach }
+ - match: { nodes.$master.ingest.processors.7.type: grok }
+ - match: { nodes.$master.ingest.processors.8.type: gsub }
+ - match: { nodes.$master.ingest.processors.9.type: join }
+ - match: { nodes.$master.ingest.processors.10.type: json }
+ - match: { nodes.$master.ingest.processors.11.type: lowercase }
+ - match: { nodes.$master.ingest.processors.12.type: remove }
+ - match: { nodes.$master.ingest.processors.13.type: rename }
+ - match: { nodes.$master.ingest.processors.14.type: script }
+ - match: { nodes.$master.ingest.processors.15.type: set }
+ - match: { nodes.$master.ingest.processors.16.type: sort }
+ - match: { nodes.$master.ingest.processors.17.type: split }
+ - match: { nodes.$master.ingest.processors.18.type: trim }
+ - match: { nodes.$master.ingest.processors.19.type: uppercase }
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml
new file mode 100644
index 0000000000..1d537ffa6b
--- /dev/null
+++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yaml
@@ -0,0 +1,40 @@
+---
+teardown:
+ - do:
+ ingest.delete_pipeline:
+ id: "1"
+ ignore: 404
+
+---
+"Test escape_dot processor":
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "dot_expander" : {
+ "field" : "foo.bar"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ type: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo.bar: "baz"
+ }
+
+ - do:
+ get:
+ index: test
+ type: test
+ id: 1
+ - match: { _source.foo.bar: "baz" }
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yaml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yaml
new file mode 100644
index 0000000000..3d9f6a97c0
--- /dev/null
+++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yaml
@@ -0,0 +1,40 @@
+---
+teardown:
+ - do:
+ ingest.delete_pipeline:
+ id: "1"
+ ignore: 404
+
+---
+"Test JSON Processor":
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "json" : {
+ "field" : "foo"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ type: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: "{\"hello\": \"world\"}"
+ }
+
+ - do:
+ get:
+ index: test
+ type: test
+ id: 1
+ - match: { _source.foo.hello: "world" }
diff --git a/modules/lang-expression/licenses/lucene-expressions-6.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.1.0.jar.sha1
deleted file mode 100644
index ac5b533d13..0000000000
--- a/modules/lang-expression/licenses/lucene-expressions-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e722f2e6b4838ede6bf4f1c088fe7b261a7b7571 \ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1
new file mode 100644
index 0000000000..205aaae6e6
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1
@@ -0,0 +1 @@
+99764b20aba5443f8a181f7015a806443c589844 \ No newline at end of file
diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java
index 6216ec2354..a762720ff9 100644
--- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java
+++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java
@@ -23,10 +23,14 @@ import groovy.lang.Binding;
import groovy.lang.GroovyClassLoader;
import groovy.lang.GroovyCodeSource;
import groovy.lang.Script;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Scorer;
import org.codehaus.groovy.ast.ClassCodeExpressionTransformer;
import org.codehaus.groovy.ast.ClassNode;
+import org.codehaus.groovy.ast.Parameter;
import org.codehaus.groovy.ast.expr.ConstantExpression;
import org.codehaus.groovy.ast.expr.Expression;
import org.codehaus.groovy.classgen.GeneratorContext;
@@ -43,7 +47,6 @@ import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.hash.MessageDigests;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ClassPermission;
import org.elasticsearch.script.CompiledScript;
@@ -93,6 +96,9 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
public GroovyScriptEngineService(Settings settings) {
super(settings);
+
+ deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
+
// Creates the classloader here in order to isolate Groovy-land code
final SecurityManager sm = System.getSecurityManager();
if (sm != null) {
@@ -179,6 +185,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
@Override
public ExecutableScript executable(CompiledScript compiledScript, Map<String, Object> vars) {
+ deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
+
try {
Map<String, Object> allVars = new HashMap<>();
if (vars != null) {
@@ -192,6 +200,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
@Override
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
+ deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
+
return new SearchScript() {
@Override
@@ -248,14 +258,14 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
private final Script script;
private final LeafSearchLookup lookup;
private final Map<String, Object> variables;
- private final ESLogger logger;
+ private final Logger logger;
- public GroovyScript(CompiledScript compiledScript, Script script, ESLogger logger) {
+ public GroovyScript(CompiledScript compiledScript, Script script, Logger logger) {
this(compiledScript, script, null, logger);
}
@SuppressWarnings("unchecked")
- public GroovyScript(CompiledScript compiledScript, Script script, @Nullable LeafSearchLookup lookup, ESLogger logger) {
+ public GroovyScript(CompiledScript compiledScript, Script script, @Nullable LeafSearchLookup lookup, Logger logger) {
this.compiledScript = compiledScript;
this.script = script;
this.lookup = lookup;
@@ -299,13 +309,13 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
// resulting in the uncaughtExceptionHandler handling it.
final StackTraceElement[] elements = ae.getStackTrace();
if (elements.length > 0 && "org.codehaus.groovy.runtime.InvokerHelper".equals(elements[0].getClassName())) {
- logger.trace("failed to run {}", ae, compiledScript);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", compiledScript), ae);
throw new ScriptException("Error evaluating " + compiledScript.name(),
ae, emptyList(), "", compiledScript.lang());
}
throw ae;
} catch (Exception | NoClassDefFoundError e) {
- logger.trace("failed to run {}", e, compiledScript);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", compiledScript), e);
throw new ScriptException("Error evaluating " + compiledScript.name(), e, emptyList(), "", compiledScript.lang());
}
}
diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java
index be307c690f..b0d5fd3366 100644
--- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java
+++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java
@@ -70,12 +70,12 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase {
public void testFieldIndexedScript() throws ExecutionException, InterruptedException {
client().admin().cluster().preparePutStoredScript()
.setId("script1")
- .setScriptLang("groovy")
+ .setScriptLang(GroovyScriptEngineService.NAME)
.setSource(new BytesArray("{ \"script\" : \"2\"}"))
.get();
client().admin().cluster().preparePutStoredScript()
.setId("script2")
- .setScriptLang("groovy")
+ .setScriptLang(GroovyScriptEngineService.NAME)
.setSource(new BytesArray("{ \"script\" : \"factor * 2\"}"))
.get();
@@ -93,8 +93,9 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase {
.prepareSearch()
.setSource(
new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(1)
- .scriptField("test1", new Script("script1", ScriptType.STORED, "groovy", null))
- .scriptField("test2", new Script("script2", ScriptType.STORED, "groovy", script2Params)))
+ .scriptField("test1", new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null))
+ .scriptField("test2",
+ new Script("script2", ScriptType.STORED, GroovyScriptEngineService.NAME, script2Params)))
.setIndices("test").setTypes("scriptTest").get();
assertHitCount(searchResponse, 5);
assertTrue(searchResponse.getHits().hits().length == 1);
@@ -120,7 +121,8 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase {
.prepareSearch()
.setSource(
new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).scriptField("test_field",
- new Script("script1", ScriptType.STORED, "groovy", null))).setIndices("test_index")
+ new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null)))
+ .setIndices("test_index")
.setTypes("test_type").get();
assertHitCount(searchResponse, 1);
SearchHit sh = searchResponse.getHits().getAt(0);
@@ -157,7 +159,7 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase {
.prepareSearch("test")
.setSource(
new SearchSourceBuilder().aggregation(AggregationBuilders.terms("test").script(
- new Script("script1", ScriptType.STORED, null, null)))).get();
+ new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null)))).get();
assertHitCount(searchResponse, 1);
assertThat(searchResponse.getAggregations().get("test"), notNullValue());
}
diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java
index f2eee2bb40..88d9b7be1d 100644
--- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java
+++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java
@@ -68,7 +68,7 @@ public class GroovyScriptTests extends ESIntegTestCase {
}
public void assertScript(String scriptString) {
- Script script = new Script(scriptString, ScriptType.INLINE, "groovy", null);
+ Script script = new Script(scriptString, ScriptType.INLINE, GroovyScriptEngineService.NAME, null);
SearchResponse resp = client().prepareSearch("test")
.setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).sort(SortBuilders.
scriptSort(script, ScriptSortType.NUMBER)))
@@ -99,7 +99,8 @@ public class GroovyScriptTests extends ESIntegTestCase {
try {
client().prepareSearch("test")
- .setQuery(constantScoreQuery(scriptQuery(new Script("null.foo", ScriptType.INLINE, "groovy", null)))).get();
+ .setQuery(constantScoreQuery(scriptQuery(
+ new Script("null.foo", ScriptType.INLINE, GroovyScriptEngineService.NAME, null)))).get();
fail("should have thrown an exception");
} catch (SearchPhaseExecutionException e) {
assertThat(e.toString() + "should not contained NotSerializableTransportException",
@@ -118,8 +119,9 @@ public class GroovyScriptTests extends ESIntegTestCase {
refresh();
// doc[] access
- SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(scriptFunction(new Script("doc['bar'].value", ScriptType.INLINE, "groovy", null)))
- .boostMode(CombineFunction.REPLACE)).get();
+ SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(scriptFunction(
+ new Script("doc['bar'].value", ScriptType.INLINE, GroovyScriptEngineService.NAME, null)))
+ .boostMode(CombineFunction.REPLACE)).get();
assertNoFailures(resp);
assertOrderedSearchHits(resp, "3", "2", "1");
@@ -133,7 +135,7 @@ public class GroovyScriptTests extends ESIntegTestCase {
// _score can be accessed
SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(matchQuery("foo", "dog"),
- scriptFunction(new Script("_score", ScriptType.INLINE, "groovy", null)))
+ scriptFunction(new Script("_score", ScriptType.INLINE, GroovyScriptEngineService.NAME, null)))
.boostMode(CombineFunction.REPLACE)).get();
assertNoFailures(resp);
assertSearchHits(resp, "3", "1");
@@ -144,9 +146,9 @@ public class GroovyScriptTests extends ESIntegTestCase {
resp = client()
.prepareSearch("test")
.setQuery(
- functionScoreQuery(matchQuery("foo", "dog"),
- scriptFunction(new Script("_score > 0.0 ? _score : 0", ScriptType.INLINE, "groovy", null))).boostMode(
- CombineFunction.REPLACE)).get();
+ functionScoreQuery(matchQuery("foo", "dog"), scriptFunction(
+ new Script("_score > 0.0 ? _score : 0", ScriptType.INLINE, GroovyScriptEngineService.NAME, null)))
+ .boostMode(CombineFunction.REPLACE)).get();
assertNoFailures(resp);
assertSearchHits(resp, "3", "1");
}
diff --git a/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/25_script_upsert.yaml b/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/25_script_upsert.yaml
index 3ab70d084f..6ba4d39e71 100644
--- a/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/25_script_upsert.yaml
+++ b/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/25_script_upsert.yaml
@@ -13,6 +13,7 @@
script:
inline: "ctx._source.foo = bar"
params: { bar: 'xxx' }
+ lang: "groovy"
upsert: { foo: baz }
- do:
@@ -33,6 +34,7 @@
script:
inline: "ctx._source.foo = bar"
params: { bar: 'xxx' }
+ lang: "groovy"
upsert: { foo: baz }
- do:
@@ -52,6 +54,7 @@
script:
inline: "ctx._source.foo = bar"
params: { bar: 'xxx' }
+ lang: "groovy"
upsert: { foo: baz }
scripted_upsert: true
diff --git a/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/90_missing.yaml b/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/90_missing.yaml
index c49565a630..999d9f610f 100644
--- a/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/90_missing.yaml
+++ b/modules/lang-groovy/src/test/resources/rest-api-spec/test/lang_groovy/90_missing.yaml
@@ -34,6 +34,7 @@
script:
inline: "ctx._source.foo = bar"
params: { bar: 'xxx' }
+ lang: "groovy"
- do:
update:
diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle
index 011f949c86..8fed78aca3 100644
--- a/modules/lang-mustache/build.gradle
+++ b/modules/lang-mustache/build.gradle
@@ -24,7 +24,7 @@ esplugin {
}
dependencies {
- compile "com.github.spullara.mustache.java:compiler:0.9.1"
+ compile "com.github.spullara.mustache.java:compiler:0.9.3"
}
integTest {
diff --git a/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1
deleted file mode 100644
index 96152e075b..0000000000
--- a/modules/lang-mustache/licenses/compiler-0.9.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-14aec5344639782ee76441401b773946c65eb2b3
diff --git a/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1
new file mode 100644
index 0000000000..2b0fbbc542
--- /dev/null
+++ b/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1
@@ -0,0 +1 @@
+2815e016c63bec4f18704ea4f5489106a5b01a99 \ No newline at end of file
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
index 66ecf23fa0..b7d7087373 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
@@ -20,6 +20,8 @@ package org.elasticsearch.script.mustache;
import com.github.mustachejava.Mustache;
import com.github.mustachejava.MustacheFactory;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
@@ -165,7 +167,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme
return null;
});
} catch (Exception e) {
- logger.error("Error running {}", e, template);
+ logger.error((Supplier<?>) () -> new ParameterizedMessage("Error running {}", template), e);
throw new GeneralScriptException("Error running " + template, e);
}
return result.bytes();
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
index 18637b15e8..6d830a21f5 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
@@ -68,6 +68,8 @@ public class RestSearchTemplateAction extends BaseRestHandler {
request.setScriptType(ScriptService.ScriptType.STORED);
request.setScript(s);
}, new ParseField("id"));
+ PARSER.declareBoolean(SearchTemplateRequest::setExplain, new ParseField("explain"));
+ PARSER.declareBoolean(SearchTemplateRequest::setProfile, new ParseField("profile"));
PARSER.declareField((parser, request, value) -> {
request.setScriptType(ScriptService.ScriptType.INLINE);
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java
index a4a6533604..1fa7f24de8 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java
@@ -41,6 +41,8 @@ public class SearchTemplateRequest extends ActionRequest<SearchTemplateRequest>
private SearchRequest request;
private boolean simulate = false;
+ private boolean explain = false;
+ private boolean profile = false;
private ScriptService.ScriptType scriptType;
private String script;
private Map<String, Object> scriptParams;
@@ -69,6 +71,22 @@ public class SearchTemplateRequest extends ActionRequest<SearchTemplateRequest>
this.simulate = simulate;
}
+ public boolean isExplain() {
+ return explain;
+ }
+
+ public void setExplain(boolean explain) {
+ this.explain = explain;
+ }
+
+ public boolean isProfile() {
+ return profile;
+ }
+
+ public void setProfile(boolean profile) {
+ this.profile = profile;
+ }
+
public ScriptService.ScriptType getScriptType() {
return scriptType;
}
@@ -123,6 +141,8 @@ public class SearchTemplateRequest extends ActionRequest<SearchTemplateRequest>
super.readFrom(in);
request = in.readOptionalStreamable(SearchRequest::new);
simulate = in.readBoolean();
+ explain = in.readBoolean();
+ profile = in.readBoolean();
scriptType = ScriptService.ScriptType.readFrom(in);
script = in.readOptionalString();
if (in.readBoolean()) {
@@ -135,6 +155,8 @@ public class SearchTemplateRequest extends ActionRequest<SearchTemplateRequest>
super.writeTo(out);
out.writeOptionalStreamable(request);
out.writeBoolean(simulate);
+ out.writeBoolean(explain);
+ out.writeBoolean(profile);
ScriptService.ScriptType.writeTo(scriptType, out);
out.writeOptionalString(script);
boolean hasParams = scriptParams != null;
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java
index 811c2523e0..52f51b7254 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java
@@ -47,6 +47,22 @@ public class SearchTemplateRequestBuilder
return this;
}
+ /**
+ * Enables explanation for each hit on how its score was computed. Disabled by default
+ */
+ public SearchTemplateRequestBuilder setExplain(boolean explain) {
+ request.setExplain(explain);
+ return this;
+ }
+
+ /**
+ * Enables profiling of the query. Disabled by default
+ */
+ public SearchTemplateRequestBuilder setProfile(boolean profile) {
+ request.setProfile(profile);
+ return this;
+ }
+
public SearchTemplateRequestBuilder setScriptType(ScriptService.ScriptType scriptType) {
request.setScriptType(scriptType);
return this;
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
index 473e287c9a..f323fcf450 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
@@ -32,14 +32,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
-import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchRequestParsers;
-import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -86,7 +83,9 @@ public class TransportSearchTemplateAction extends HandledTransportAction<Search
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
SearchSourceBuilder builder = SearchSourceBuilder.searchSource();
builder.parseXContent(new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher),
- searchRequestParsers.aggParsers, searchRequestParsers.suggesters);
+ searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers);
+ builder.explain(request.isExplain());
+ builder.profile(request.isProfile());
searchRequest.source(builder);
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yaml
index c7e4c91b4c..b09976885d 100644
--- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yaml
+++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/30_search_template.yaml
@@ -106,3 +106,19 @@
- match: { hits.total: 1 }
- length: { hits.hits: 1 }
+
+ - do:
+ search_template:
+ body: { "file" : "template_1", "params": { "size": "2", "field": "otherField", "value": "foo" }, "explain" : true }
+
+ - match: { hits.total: 1 }
+ - length: { hits.hits: 1 }
+ - match: { hits.hits.0._explanation.description: "weight(otherField:foo in 0) [PerFieldSimilarity], result of:" }
+
+ - do:
+ search_template:
+ body: { "file" : "template_1", "params": { "size": "2", "field": "otherField", "value": "foo" }, "profile" : true }
+
+ - match: { hits.total: 1 }
+ - length: { hits.hits: 1 }
+ - length: { profile: 1 }
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java
index f0e1bde74d..9ef1b2ccf1 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java
@@ -19,10 +19,18 @@
package org.elasticsearch.painless;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Setting.Property;
+
/**
* Settings to use when compiling a script.
*/
public final class CompilerSettings {
+ /**
+ * Are regexes enabled? This is a node level setting because regexes break out of painless's lovely sandbox and can cause stack
+ * overflows and we can't analyze the regex to be sure it won't.
+ */
+ public static final Setting<Boolean> REGEX_ENABLED = Setting.boolSetting("script.painless.regex.enabled", false, Property.NodeScope);
/**
* Constant to be used when specifying the maximum loop counter when compiling a script.
@@ -56,6 +64,12 @@ public final class CompilerSettings {
private int initialCallSiteDepth = 0;
/**
+ * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
+ * <strong>looking</strong> regexes can cause stack overflows.
+ */
+ private boolean regexesEnabled = false;
+
+ /**
* Returns the value for the cumulative total number of statements that can be made in all loops
* in a script before an exception is thrown. This attempts to prevent infinite loops. Note if
* the counter is set to 0, no loop counter will be written.
@@ -104,4 +118,20 @@ public final class CompilerSettings {
public void setInitialCallSiteDepth(int depth) {
this.initialCallSiteDepth = depth;
}
+
+ /**
+ * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
+ * <strong>looking</strong> regexes can cause stack overflows.
+ */
+ public boolean areRegexesEnabled() {
+ return regexesEnabled;
+ }
+
+ /**
+ * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
+ * <strong>looking</strong> regexes can cause stack overflows.
+ */
+ public void setRegexesEnabled(boolean regexesEnabled) {
+ this.regexesEnabled = regexesEnabled;
+ }
}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java
index 954314286b..c00dc64310 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java
@@ -20,19 +20,21 @@
package org.elasticsearch.painless;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin;
-import org.elasticsearch.script.ScriptEngineRegistry;
import org.elasticsearch.script.ScriptEngineService;
-import org.elasticsearch.script.ScriptModule;
+
+import java.util.Arrays;
+import java.util.List;
/**
* Registers Painless as a plugin.
*/
public final class PainlessPlugin extends Plugin implements ScriptPlugin {
- // force to pare our definition at startup (not on the user's first script)
+ // force to parse our definition at startup (not on the user's first script)
static {
Definition.VOID_TYPE.hashCode();
}
@@ -41,4 +43,9 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin {
public ScriptEngineService getScriptEngineService(Settings settings) {
return new PainlessScriptEngineService(settings);
}
+
+ @Override
+ public List<Setting<?>> getSettings() {
+ return Arrays.asList(CompilerSettings.REGEX_ENABLED);
+ }
}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java
index 834593aeb9..cc16534399 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java
@@ -54,11 +54,6 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
public static final String NAME = "painless";
/**
- * Default compiler settings to be used.
- */
- private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings();
-
- /**
* Permissions context used during compilation.
*/
private static final AccessControlContext COMPILATION_CONTEXT;
@@ -75,11 +70,18 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
}
/**
+ * Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside
+ * of {@link PainlessScriptEngineService#PainlessScriptEngineService(Settings)}.
+ */
+ private final CompilerSettings defaultCompilerSettings = new CompilerSettings();
+
+ /**
* Constructor.
* @param settings The settings to initialize the engine with.
*/
public PainlessScriptEngineService(final Settings settings) {
super(settings);
+ defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings));
}
/**
@@ -111,29 +113,36 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
if (params.isEmpty()) {
// Use the default settings.
- compilerSettings = DEFAULT_COMPILER_SETTINGS;
+ compilerSettings = defaultCompilerSettings;
} else {
// Use custom settings specified by params.
compilerSettings = new CompilerSettings();
+
+ // Except regexes enabled - this is a node level setting and can't be changed in the request.
+ compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled());
+
Map<String, String> copy = new HashMap<>(params);
- String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
+ String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
if (value != null) {
compilerSettings.setMaxLoopCounter(Integer.parseInt(value));
}
value = copy.remove(CompilerSettings.PICKY);
-
if (value != null) {
compilerSettings.setPicky(Boolean.parseBoolean(value));
}
value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH);
-
if (value != null) {
compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value));
}
+ value = copy.remove(CompilerSettings.REGEX_ENABLED.getKey());
+ if (value != null) {
+ throw new IllegalArgumentException("[painless.regex.enabled] can only be set on node startup.");
+ }
+
if (!copy.isEmpty()) {
throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy);
}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java
index 61269419fd..da430f4280 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java
@@ -796,6 +796,11 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitRegex(RegexContext ctx) {
+ if (false == settings.areRegexesEnabled()) {
+ throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] "
+ + "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep "
+ + "recursion and long loops."));
+ }
String text = ctx.REGEX().getText();
int lastSlash = text.lastIndexOf('/');
String pattern = text.substring(1, lastSlash);
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java
index 50b56505eb..a018ec4822 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java
@@ -262,8 +262,8 @@ public final class EAssignment extends AExpression {
rhs.write(writer, globals); // write the bytecode for the rhs
- if (!(rhs instanceof EBinary) || ((EBinary)rhs).cat) {
- writer.writeAppendStrings(rhs.actual); // append the rhs's value unless it's also a concatenation
+ if (!(rhs instanceof EBinary) || !((EBinary)rhs).cat) { // check to see if the rhs has already done a concatenation
+ writer.writeAppendStrings(rhs.actual); // append the rhs's value since it's hasn't already
}
writer.writeToStrings(); // put the value for string concat onto the stack
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java
index 9d462b1033..eb24ebb5e3 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java
@@ -183,6 +183,8 @@ public final class SFor extends AStatement {
writer.writeLoopCounter(loopCounter.getSlot(), statementCount, location);
}
+ block.continu = begin;
+ block.brake = end;
block.write(writer, globals);
} else {
if (loopCounter != null) {
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java
index d425d59f21..2841aff46b 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java
@@ -100,6 +100,8 @@ final class SSubEachArray extends AStatement {
writer.writeLoopCounter(loopCounter.getSlot(), statementCount, location);
}
+ block.continu = begin;
+ block.brake = end;
block.write(writer, globals);
writer.goTo(begin);
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java
index ce4507ed98..57acb07aa6 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java
@@ -122,6 +122,8 @@ final class SSubEachIterable extends AStatement {
writer.writeLoopCounter(loopCounter.getSlot(), statementCount, location);
}
+ block.continu = begin;
+ block.brake = end;
block.write(writer, globals);
writer.goTo(begin);
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java
index a4f85f393b..fc2fffb644 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java
@@ -1,5 +1,6 @@
package org.elasticsearch.painless;
+import java.util.ArrayList;
import java.util.Collections;
/*
@@ -22,6 +23,7 @@ import java.util.Collections;
*/
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
public class BasicStatementTests extends ScriptTestCase {
@@ -272,4 +274,198 @@ public class BasicStatementTests extends ScriptTestCase {
null, true
));
}
+
+ // tests both single break and multiple breaks used in a script
+ public void testForWithBreak() {
+ // single break test
+ assertEquals(1, exec(
+ "Map settings = ['test1' : '1'];" +
+ "int i = 0;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (; i < keys.size(); ++i) {" +
+ " if (settings.containsKey(keys[i])) {" +
+ " break;" +
+ " }" +
+ "}" +
+ "return i;"
+ ));
+
+ List<Integer> expected = new ArrayList<>();
+ expected.add(1);
+ expected.add(0);
+
+ // multiple breaks test
+ assertEquals(expected, exec(
+ "Map outer = ['test1' : '1'];" +
+ "Map inner = ['test0' : '2'];" +
+ "boolean found = false;" +
+ "int i = 0, j = 0;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (; i < keys.size(); ++i) {" +
+ " if (outer.containsKey(keys[i])) {" +
+ " for (; j < keys.size(); ++j) {" +
+ " if (inner.containsKey(keys[j])) {" +
+ " found = true;" +
+ " break;" +
+ " }" +
+ " }" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ "}" +
+ "[i, j];"
+ ));
+
+ expected.set(1, 3);
+
+ // multiple breaks test, ignore inner break
+ assertEquals(expected, exec(
+ "Map outer = ['test1' : '1'];" +
+ "Map inner = ['test3' : '2'];" +
+ "int i = 0, j = 0;" +
+ "boolean found = false;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (; i < keys.size(); ++i) {" +
+ " if (outer.containsKey(keys[i])) {" +
+ " for (; j < keys.size(); ++j) {" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ " found = true;" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ "}" +
+ "[i, j];"
+ ));
+
+ expected.set(0, 3);
+ expected.set(1, 1);
+
+ // multiple breaks test, ignore outer break
+ assertEquals(expected, exec(
+ "Map outer = ['test3' : '1'];" +
+ "Map inner = ['test1' : '2'];" +
+ "int i = 0, j = 0;" +
+ "boolean found = false;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (; i < keys.size(); ++i) {" +
+ " if (outer.containsKey('test3')) {" +
+ " for (; j < keys.size(); ++j) {" +
+ " if (inner.containsKey(keys[j])) {" +
+ " break;" +
+ " }" +
+ " }" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ "}" +
+ "[i, j];"
+ ));
+ }
+
+ // tests both single break and multiple breaks used in a script
+ public void testForEachWithBreak() {
+ // single break test
+ assertEquals(1, exec(
+ "Map settings = ['test1' : '1'];" +
+ "int i = 0;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (String key : keys) {" +
+ " if (settings.containsKey(key)) {" +
+ " break;" +
+ " }" +
+ " ++i;" +
+ "}" +
+ "return i;"
+ ));
+
+ List<Integer> expected = new ArrayList<>();
+ expected.add(1);
+ expected.add(0);
+
+ // multiple breaks test
+ assertEquals(expected, exec(
+ "Map outer = ['test1' : '1'];" +
+ "Map inner = ['test0' : '2'];" +
+ "int i = 0, j = 0;" +
+ "boolean found = false;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (String okey : keys) {" +
+ " if (outer.containsKey(okey)) {" +
+ " for (String ikey : keys) {" +
+ " if (inner.containsKey(ikey)) {" +
+ " found = true;" +
+ " break;" +
+ " }" +
+ " ++j;" +
+ " }" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ " ++i;" +
+ "}" +
+ "[i, j];"
+ ));
+
+ expected.set(0, 3);
+ expected.set(1, 1);
+
+ // multiple breaks test, ignore outer break
+ assertEquals(expected, exec(
+ "Map outer = ['test1' : '1'];" +
+ "Map inner = ['test1' : '1'];" +
+ "int i = 0, j = 0;" +
+ "boolean found = false;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (String okey : keys) {" +
+ " if (outer.containsKey(okey)) {" +
+ " for (String ikey : keys) {" +
+ " if (inner.containsKey(ikey)) {" +
+ " break;" +
+ " }" +
+ " ++j;" +
+ " }" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ " ++i;" +
+ "}" +
+ "[i, j];"
+ ));
+
+ expected.set(0, 1);
+ expected.set(1, 3);
+
+ // multiple breaks test, ignore inner break
+ assertEquals(expected, exec(
+ "Map outer = ['test1' : '1'];" +
+ "Map inner = ['test1' : '1'];" +
+ "int i = 0, j = 0;" +
+ "boolean found = false;" +
+ "List keys = ['test0', 'test1', 'test2'];" +
+ "for (String okey : keys) {" +
+ " if (outer.containsKey(okey)) {" +
+ " for (String ikey : keys) {" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " ++j;" +
+ " }" +
+ " found = true;" +
+ " if (found) {" +
+ " break;" +
+ " }" +
+ " }" +
+ " ++i;" +
+ "}" +
+ "[i, j];"
+ ));
+ }
}
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java
index dbbb9958d7..1c53692ad7 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java
@@ -19,17 +19,26 @@
package org.elasticsearch.painless;
+import org.elasticsearch.common.settings.Settings;
+
import java.nio.CharBuffer;
import java.util.Arrays;
import java.util.HashSet;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
-import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.containsString;
public class RegexTests extends ScriptTestCase {
+ @Override
+ protected Settings scriptEngineSettings() {
+ // Enable regexes just for this test. They are disabled by default.
+ return Settings.builder()
+ .put(CompilerSettings.REGEX_ENABLED.getKey(), true)
+ .build();
+ }
+
public void testPatternAfterReturn() {
assertEquals(true, exec("return 'foo' ==~ /foo/"));
assertEquals(false, exec("return 'bar' ==~ /foo/"));
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java
index 63c929a69a..672204cbc2 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java
@@ -45,7 +45,14 @@ public abstract class ScriptTestCase extends ESTestCase {
@Before
public void setup() {
- scriptEngine = new PainlessScriptEngineService(Settings.EMPTY);
+ scriptEngine = new PainlessScriptEngineService(scriptEngineSettings());
+ }
+
+ /**
+ * Settings used to build the script engine. Override to customize settings like {@link RegexTests} does to enable regexes.
+ */
+ protected Settings scriptEngineSettings() {
+ return Settings.EMPTY;
}
/** Compiles and returns the result of {@code script} */
@@ -71,6 +78,7 @@ public abstract class ScriptTestCase extends ESTestCase {
if (picky) {
CompilerSettings pickySettings = new CompilerSettings();
pickySettings.setPicky(true);
+ pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings()));
Walker.buildPainlessTree(getTestName(), script, pickySettings, null);
}
// test actual script execution
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java
index 873e773b9a..b5b3e2cfbf 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java
@@ -21,7 +21,9 @@ package org.elasticsearch.painless;
import static org.elasticsearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS;
+import java.util.HashMap;
import java.util.Locale;
+import java.util.Map;
public class StringTests extends ScriptTestCase {
@@ -182,7 +184,7 @@ public class StringTests extends ScriptTestCase {
});
assertTrue(expected.getMessage().contains("Cannot cast [String] with length greater than one to [char]."));
}
-
+
public void testDefConcat() {
assertEquals("a" + (byte)2, exec("def x = 'a'; def y = (byte)2; return x + y"));
assertEquals("a" + (short)2, exec("def x = 'a'; def y = (short)2; return x + y"));
@@ -205,7 +207,7 @@ public class StringTests extends ScriptTestCase {
exec("def x = null; def y = null; return x + y");
});
}
-
+
public void testDefCompoundAssignment() {
assertEquals("a" + (byte)2, exec("def x = 'a'; x += (byte)2; return x"));
assertEquals("a" + (short)2, exec("def x = 'a'; x += (short)2; return x"));
@@ -222,6 +224,17 @@ public class StringTests extends ScriptTestCase {
});
}
+ public void testComplexCompoundAssignment() {
+ Map<String, Object> params = new HashMap<>();
+ Map<String, Object> ctx = new HashMap<>();
+ ctx.put("_id", "somerandomid");
+ params.put("ctx", ctx);
+
+ assertEquals("somerandomid.somerandomid", exec("ctx._id += '.' + ctx._id", params, false));
+ assertEquals("somerandomid.somerandomid", exec("String x = 'somerandomid'; x += '.' + x"));
+ assertEquals("somerandomid.somerandomid", exec("def x = 'somerandomid'; x += '.' + x"));
+ }
+
public void testAppendStringIntoMap() {
assertEquals("nullcat", exec("def a = new HashMap(); a.cat += 'cat'"));
}
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java
index 1d60eb9c29..7e4311f24e 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java
@@ -20,14 +20,13 @@
package org.elasticsearch.painless;
import org.apache.lucene.util.Constants;
-import org.elasticsearch.script.ScriptException;
import java.lang.invoke.WrongMethodTypeException;
import java.util.Arrays;
import java.util.Collections;
import static java.util.Collections.emptyMap;
-import static org.hamcrest.Matchers.containsString;
+import static java.util.Collections.singletonMap;
public class WhenThingsGoWrongTests extends ScriptTestCase {
public void testNullPointer() {
@@ -234,4 +233,16 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
exec("void recurse(int x, int y) {recurse(x, y)} recurse(1, 2);");
});
}
+
+ public void testRegexDisabledByDefault() {
+ IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/"));
+ assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. "
+ + "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage());
+ }
+
+ public void testCanNotOverrideRegexEnabled() {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> exec("", null, singletonMap(CompilerSettings.REGEX_ENABLED.getKey(), "true"), null, false));
+ assertEquals("[painless.regex.enabled] can only be set on node startup.", e.getMessage());
+ }
}
diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml
new file mode 100644
index 0000000000..bcf02f657b
--- /dev/null
+++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml
@@ -0,0 +1,33 @@
+---
+"Regex in update fails":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ foo: bar
+ count: 1
+
+ - do:
+ catch: /Regexes are disabled. Set \[script.painless.regex.enabled\] to \[true\] in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep recursion and long loops./
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ script:
+ lang: painless
+ inline: "ctx._source.foo = params.bar ==~ /cat/"
+ params: { bar: 'xxx' }
+
+---
+"Regex enabled is not a dynamic setting":
+
+ - do:
+ catch: /setting \[script.painless.regex.enabled\], not dynamically updateable/
+ cluster.put_settings:
+ body:
+ transient:
+ script.painless.regex.enabled: true
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java
index 3d5965b358..74ce3a5be1 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java
@@ -390,7 +390,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
if (analyzer != null) {
return analyzer;
} else {
- return context.getAnalysisService().defaultIndexAnalyzer();
+ return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
@@ -515,7 +515,8 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
currentFieldName = sourceParser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
- return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
+ QueryParseContext queryParseContext = context.newParseContextWithLegacyScriptLanguage(sourceParser);
+ return parseQuery(context, mapUnmappedFieldsAsString, queryParseContext, sourceParser);
} else {
sourceParser.skipChildren();
}
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java
index 4509ab3cb5..98aaa89164 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java
@@ -242,8 +242,8 @@ public class PercolateRequest extends ActionRequest<PercolateRequest> implements
if (source == null && getRequest == null) {
validationException = addValidationError("source or get is missing", validationException);
}
- if (getRequest != null && getRequest.fields() != null) {
- validationException = addValidationError("get fields option isn't supported via percolate request", validationException);
+ if (getRequest != null && getRequest.storedFields() != null) {
+ validationException = addValidationError("get stored fields option isn't supported via percolate request", validationException);
}
return validationException;
}
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
index 3263377516..a502e92402 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
@@ -53,6 +53,8 @@ import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.BoostingQueryBuilder;
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
+import org.elasticsearch.index.query.HasChildQueryBuilder;
+import org.elasticsearch.index.query.HasParentQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
@@ -268,8 +270,10 @@ public class PercolatorFieldMapper extends FieldMapper {
}
XContentParser parser = context.parser();
- QueryBuilder queryBuilder = parseQueryBuilder(queryShardContext.newParseContext(parser), parser.getTokenLocation());
- verifyRangeQueries(queryBuilder);
+ QueryBuilder queryBuilder = parseQueryBuilder(
+ queryShardContext.newParseContext(parser), parser.getTokenLocation()
+ );
+ verifyQuery(queryBuilder);
// Fetching of terms, shapes and indexed scripts happen during this rewrite:
queryBuilder = queryBuilder.rewrite(queryShardContext);
@@ -310,7 +314,12 @@ public class PercolatorFieldMapper extends FieldMapper {
}
public static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser) throws IOException {
- return toQuery(context, mapUnmappedFieldsAsString, parseQueryBuilder(context.newParseContext(parser), parser.getTokenLocation()));
+ return parseQuery(context, mapUnmappedFieldsAsString, context.newParseContext(parser), parser);
+ }
+
+ public static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryParseContext queryParseContext,
+ XContentParser parser) throws IOException {
+ return toQuery(context, mapUnmappedFieldsAsString, parseQueryBuilder(queryParseContext, parser.getTokenLocation()));
}
static Query toQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder queryBuilder) throws IOException {
@@ -356,19 +365,26 @@ public class PercolatorFieldMapper extends FieldMapper {
}
/**
- * Fails if a range query with a date range is found based on current time
+ * Fails if a percolator contains an unsupported query. The following queries are not supported:
+ * 1) a range query with a date range based on current time
+ * 2) a has_child query
+ * 3) a has_parent query
*/
- static void verifyRangeQueries(QueryBuilder queryBuilder) {
+ static void verifyQuery(QueryBuilder queryBuilder) {
if (queryBuilder instanceof RangeQueryBuilder) {
RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) queryBuilder;
if (rangeQueryBuilder.from() instanceof String) {
String from = (String) rangeQueryBuilder.from();
String to = (String) rangeQueryBuilder.to();
if (from.contains("now") || to.contains("now")) {
- throw new IllegalArgumentException("Percolator queries containing time range queries based on the " +
- "current time are forbidden");
+ throw new IllegalArgumentException("percolator queries containing time range queries based on the " +
+ "current time is unsupported");
}
}
+ } else if (queryBuilder instanceof HasChildQueryBuilder) {
+ throw new IllegalArgumentException("the [has_child] query is unsupported inside a percolator query");
+ } else if (queryBuilder instanceof HasParentQueryBuilder) {
+ throw new IllegalArgumentException("the [has_parent] query is unsupported inside a percolator query");
} else if (queryBuilder instanceof BoolQueryBuilder) {
BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder;
List<QueryBuilder> clauses = new ArrayList<>();
@@ -377,15 +393,15 @@ public class PercolatorFieldMapper extends FieldMapper {
clauses.addAll(boolQueryBuilder.mustNot());
clauses.addAll(boolQueryBuilder.should());
for (QueryBuilder clause : clauses) {
- verifyRangeQueries(clause);
+ verifyQuery(clause);
}
} else if (queryBuilder instanceof ConstantScoreQueryBuilder) {
- verifyRangeQueries(((ConstantScoreQueryBuilder) queryBuilder).innerQuery());
+ verifyQuery(((ConstantScoreQueryBuilder) queryBuilder).innerQuery());
} else if (queryBuilder instanceof FunctionScoreQueryBuilder) {
- verifyRangeQueries(((FunctionScoreQueryBuilder) queryBuilder).query());
+ verifyQuery(((FunctionScoreQueryBuilder) queryBuilder).query());
} else if (queryBuilder instanceof BoostingQueryBuilder) {
- verifyRangeQueries(((BoostingQueryBuilder) queryBuilder).negativeQuery());
- verifyRangeQueries(((BoostingQueryBuilder) queryBuilder).positiveQuery());
+ verifyQuery(((BoostingQueryBuilder) queryBuilder).negativeQuery());
+ verifyQuery(((BoostingQueryBuilder) queryBuilder).positiveQuery());
}
}
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java
index 8154c63290..c30a988a4f 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java
@@ -48,6 +48,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java
index 44914e140b..8b290d4c9c 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java
@@ -37,9 +37,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.SearchRequestParsers;
-import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -160,8 +158,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
try {
SearchRequest searchRequest = TransportPercolateAction.createSearchRequest(
percolateRequest, docSource, searchRequestParsers.queryParsers,
- searchRequestParsers.aggParsers, parseFieldMatcher
- );
+ searchRequestParsers.aggParsers, searchRequestParsers.searchExtParsers, parseFieldMatcher);
multiSearchRequest.add(searchRequest);
} catch (Exception e) {
preFailures.put(i, new MultiPercolateResponse.Item(e));
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java
index 55e7d922c3..2322c981a7 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java
@@ -45,6 +45,7 @@ import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.search.SearchExtRegistry;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.SearchRequestParsers;
@@ -69,7 +70,8 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
public TransportPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Client client, SearchRequestParsers searchRequestParsers) {
- super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PercolateRequest::new);
+ super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters,
+ indexNameExpressionResolver, PercolateRequest::new);
this.client = client;
this.searchRequestParsers = searchRequestParsers;
this.parseFieldMatcher = new ParseFieldMatcher(settings);
@@ -84,7 +86,8 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
if (getResponse.isExists()) {
innerDoExecute(request, getResponse.getSourceAsBytesRef(), listener);
} else {
- onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", request.getRequest().index(), request.getRequest().type(), request.getRequest().id()));
+ onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist",
+ request.getRequest().index(), request.getRequest().type(), request.getRequest().id()));
}
}
@@ -102,7 +105,7 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
SearchRequest searchRequest;
try {
searchRequest = createSearchRequest(request, docSource, searchRequestParsers.queryParsers,
- searchRequestParsers.aggParsers, parseFieldMatcher);
+ searchRequestParsers.aggParsers, searchRequestParsers.searchExtParsers, parseFieldMatcher);
} catch (IOException e) {
listener.onFailure(e);
return;
@@ -124,7 +127,10 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
});
}
- public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers, ParseFieldMatcher parseFieldMatcher) throws IOException {
+ public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource,
+ IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers,
+ SearchExtRegistry searchExtRegistry, ParseFieldMatcher parseFieldMatcher)
+ throws IOException {
SearchRequest searchRequest = new SearchRequest();
if (percolateRequest.indices() != null) {
searchRequest.indices(percolateRequest.indices());
@@ -220,7 +226,7 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(source)) {
QueryParseContext context = new QueryParseContext(queryRegistry, parser, parseFieldMatcher);
- searchSourceBuilder.parseXContent(context, aggParsers, null);
+ searchSourceBuilder.parseXContent(context, aggParsers, null, searchExtRegistry);
searchRequest.source(searchSourceBuilder);
return searchRequest;
}
@@ -235,7 +241,8 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
matches = new PercolateResponse.Match[hits.getHits().length];
for (int i = 0; i < hits.getHits().length; i++) {
SearchHit hit = hits.getHits()[i];
- matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()), new Text(hit.getId()), hit.getScore(), hit.getHighlightFields());
+ matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()),
+ new Text(hit.getId()), hit.getScore(), hit.getHighlightFields());
}
}
@@ -246,8 +253,8 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
}
return new PercolateResponse(
- searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(),
- shardFailures, matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations()
+ searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(), shardFailures,
+ matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations()
);
}
diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java
index 7a51d8a7ab..d103b7d3ec 100644
--- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java
+++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java
@@ -21,6 +21,7 @@ package org.elasticsearch.percolator;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.Version;
+import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MappingMetaData;
@@ -28,20 +29,26 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Map;
+import java.util.function.Function;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
@@ -52,7 +59,7 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Collections.singleton(PercolatorPlugin.class);
+ return Arrays.asList(PercolatorPlugin.class, FoolMeScriptLang.class);
}
@Override
@@ -81,25 +88,43 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
.setTypes(".percolator")
.addSort("_uid", SortOrder.ASC)
.get();
- assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(4L));
assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2"));
assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("4"));
+ assertThat(XContentMapValues.extractValue("query.script.script.inline",
+ searchResponse.getHits().getAt(3).sourceAsMap()), equalTo("return true"));
+ // we don't upgrade the script definitions so that they include explicitly the lang,
+ // because we read / parse the query at search time.
+ assertThat(XContentMapValues.extractValue("query.script.script.lang",
+ searchResponse.getHits().getAt(3).sourceAsMap()), nullValue());
// verify percolate response
PercolateResponse percolateResponse = preparePercolate(client())
+ .setIndices(INDEX_NAME)
+ .setDocumentType("message")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .get();
+
+ assertThat(percolateResponse.getCount(), equalTo(1L));
+ assertThat(percolateResponse.getMatches().length, equalTo(1));
+ assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("4"));
+
+ percolateResponse = preparePercolate(client())
.setIndices(INDEX_NAME)
.setDocumentType("message")
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog"))
.get();
- assertThat(percolateResponse.getCount(), equalTo(2L));
- assertThat(percolateResponse.getMatches().length, equalTo(2));
+ assertThat(percolateResponse.getCount(), equalTo(3L));
+ assertThat(percolateResponse.getMatches().length, equalTo(3));
assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1"));
assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2"));
+ assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4"));
// add an extra query and verify the results
- client().prepareIndex(INDEX_NAME, ".percolator", "4")
+ client().prepareIndex(INDEX_NAME, ".percolator", "5")
.setSource(jsonBuilder().startObject().field("query", matchQuery("message", "fox jumps")).endObject())
.get();
refresh();
@@ -110,25 +135,39 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog"))
.get();
- assertThat(percolateResponse.getCount(), equalTo(3L));
- assertThat(percolateResponse.getMatches().length, equalTo(3));
+ assertThat(percolateResponse.getCount(), equalTo(4L));
+ assertThat(percolateResponse.getMatches().length, equalTo(4));
assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1"));
assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2"));
assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4"));
}
private void setupNode() throws Exception {
- Path dataDir = createTempDir();
- Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName()));
+ Path clusterDir = createTempDir();
try (InputStream stream = PercolatorBackwardsCompatibilityTests.class.
getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) {
TestUtil.unzip(stream, clusterDir);
}
Settings.Builder nodeSettings = Settings.builder()
- .put(Environment.PATH_DATA_SETTING.getKey(), dataDir);
+ .put(Environment.PATH_DATA_SETTING.getKey(), clusterDir);
internalCluster().startNode(nodeSettings.build());
ensureGreen(INDEX_NAME);
}
+ // Fool the script service that this is the groovy script language,
+ // so that we can run a script that has no lang defined implicetely against the legacy language:
+ public static class FoolMeScriptLang extends MockScriptPlugin {
+
+ @Override
+ protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
+ return Collections.singletonMap("return true", (vars) -> true);
+ }
+
+ @Override
+ public String pluginScriptLang() {
+ return "groovy";
+ }
+ }
+
}
diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java
index 57dda2f55c..621cb07d3c 100644
--- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java
+++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java
@@ -32,13 +32,21 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
@@ -46,6 +54,8 @@ import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.BoostingQueryBuilder;
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
+import org.elasticsearch.index.query.HasChildQueryBuilder;
+import org.elasticsearch.index.query.HasParentQueryBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
@@ -55,7 +65,11 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
import org.elasticsearch.indices.TermsLookup;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.script.MockScriptPlugin;
+import org.elasticsearch.script.Script;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.io.IOException;
@@ -64,7 +78,10 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;
@@ -91,7 +108,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
- return Collections.singleton(PercolatorPlugin.class);
+ return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class, FoolMeScriptPlugin.class);
}
@Before
@@ -429,23 +446,31 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
}
- public void testVerifyRangeQueries() {
+ public void testUnsupportedQueries() {
RangeQueryBuilder rangeQuery1 = new RangeQueryBuilder("field").from("2016-01-01||/D").to("2017-01-01||/D");
RangeQueryBuilder rangeQuery2 = new RangeQueryBuilder("field").from("2016-01-01||/D").to("now");
- PercolatorFieldMapper.verifyRangeQueries(rangeQuery1);
- expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyRangeQueries(rangeQuery2));
- PercolatorFieldMapper.verifyRangeQueries(new BoolQueryBuilder().must(rangeQuery1));
+ PercolatorFieldMapper.verifyQuery(rangeQuery1);
+ expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(rangeQuery2));
+ PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(rangeQuery1));
expectThrows(IllegalArgumentException.class, () ->
- PercolatorFieldMapper.verifyRangeQueries(new BoolQueryBuilder().must(rangeQuery2)));
- PercolatorFieldMapper.verifyRangeQueries(new ConstantScoreQueryBuilder((rangeQuery1)));
+ PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(rangeQuery2)));
+ PercolatorFieldMapper.verifyQuery(new ConstantScoreQueryBuilder((rangeQuery1)));
expectThrows(IllegalArgumentException.class, () ->
- PercolatorFieldMapper.verifyRangeQueries(new ConstantScoreQueryBuilder(rangeQuery2)));
- PercolatorFieldMapper.verifyRangeQueries(new BoostingQueryBuilder(rangeQuery1, new MatchAllQueryBuilder()));
+ PercolatorFieldMapper.verifyQuery(new ConstantScoreQueryBuilder(rangeQuery2)));
+ PercolatorFieldMapper.verifyQuery(new BoostingQueryBuilder(rangeQuery1, new MatchAllQueryBuilder()));
expectThrows(IllegalArgumentException.class, () ->
- PercolatorFieldMapper.verifyRangeQueries(new BoostingQueryBuilder(rangeQuery2, new MatchAllQueryBuilder())));
- PercolatorFieldMapper.verifyRangeQueries(new FunctionScoreQueryBuilder(rangeQuery1, new RandomScoreFunctionBuilder()));
+ PercolatorFieldMapper.verifyQuery(new BoostingQueryBuilder(rangeQuery2, new MatchAllQueryBuilder())));
+ PercolatorFieldMapper.verifyQuery(new FunctionScoreQueryBuilder(rangeQuery1, new RandomScoreFunctionBuilder()));
expectThrows(IllegalArgumentException.class, () ->
- PercolatorFieldMapper.verifyRangeQueries(new FunctionScoreQueryBuilder(rangeQuery2, new RandomScoreFunctionBuilder())));
+ PercolatorFieldMapper.verifyQuery(new FunctionScoreQueryBuilder(rangeQuery2, new RandomScoreFunctionBuilder())));
+
+ HasChildQueryBuilder hasChildQuery = new HasChildQueryBuilder("_type", new MatchAllQueryBuilder(), ScoreMode.None);
+ expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(hasChildQuery));
+ expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(hasChildQuery)));
+
+ HasParentQueryBuilder hasParentQuery = new HasParentQueryBuilder("_type", new MatchAllQueryBuilder(), false);
+ expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(hasParentQuery));
+ expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(hasParentQuery)));
}
private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException {
@@ -454,4 +479,93 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser);
assertThat(qsc.parseInnerQueryBuilder().get(), equalTo(expected));
}
+
+
+ public void testEmptyName() throws Exception {
+ // after 5.x
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("").field("type", "percolator").endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = mapperService.documentMapperParser();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type1", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser();
+
+ DocumentMapper defaultMapper = parser2x.parse("type1", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
+
+ public void testImplicitlySetDefaultScriptLang() throws Exception {
+ addQueryMapping();
+ XContentBuilder query = jsonBuilder();
+ query.startObject();
+ query.startObject("script");
+ if (randomBoolean()) {
+ query.field("script", "return true");
+ } else {
+ query.startObject("script");
+ query.field("inline", "return true");
+ query.endObject();
+ }
+ query.endObject();
+ query.endObject();
+
+ ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1",
+ XContentFactory.jsonBuilder().startObject()
+ .rawField(fieldName, new BytesArray(query.string()))
+ .endObject().bytes());
+ BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
+ Map<String, Object> parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2();
+ assertEquals(Script.DEFAULT_SCRIPT_LANG, XContentMapValues.extractValue("script.script.lang", parsedQuery));
+
+ query = jsonBuilder();
+ query.startObject();
+ query.startObject("function_score");
+ query.startArray("functions");
+ query.startObject();
+ query.startObject("script_score");
+ if (randomBoolean()) {
+ query.field("script", "return true");
+ } else {
+ query.startObject("script");
+ query.field("inline", "return true");
+ query.endObject();
+ }
+ query.endObject();
+ query.endObject();
+ query.endArray();
+ query.endObject();
+ query.endObject();
+
+ doc = mapperService.documentMapper(typeName).parse("test", typeName, "1",
+ XContentFactory.jsonBuilder().startObject()
+ .rawField(fieldName, new BytesArray(query.string()))
+ .endObject().bytes());
+ querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
+ parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2();
+ assertEquals(Script.DEFAULT_SCRIPT_LANG,
+ ((List) XContentMapValues.extractValue("function_score.functions.script_score.script.lang", parsedQuery)).get(0));
+ }
+
+ // Just so that we store scripts in percolator queries, but not really execute these scripts.
+ public static class FoolMeScriptPlugin extends MockScriptPlugin {
+
+ @Override
+ protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
+ return Collections.singletonMap("return true", (vars) -> true);
+ }
+
+ @Override
+ public String pluginScriptLang() {
+ return Script.DEFAULT_SCRIPT_LANG;
+ }
+ }
+
}
diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java
index e4a10ce04a..7d10b831bc 100644
--- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java
+++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java
@@ -1777,16 +1777,15 @@ public class PercolatorIT extends ESIntegTestCase {
assertThat(response1.getMatches()[0].getId().string(), equalTo("1"));
}
- public void testParentChild() throws Exception {
- // We don't fail p/c queries, but those queries are unusable because only a single document can be provided in
- // the percolate api
-
+ public void testFailParentChild() throws Exception {
assertAcked(prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("child", "_parent", "type=parent").addMapping("parent"));
- client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
+ Exception e = expectThrows(MapperParsingException.class, () -> client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
.setSource(jsonBuilder().startObject().field("query", hasChildQuery("child", matchAllQuery(), ScoreMode.None)).endObject())
- .execute().actionGet();
+ .get());
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ assertThat(e.getCause().getMessage(), equalTo("the [has_child] query is unsupported inside a percolator query"));
}
public void testPercolateDocumentWithParentField() throws Exception {
diff --git a/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip b/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip
index f0e2d05e4a..43a8cceb19 100644
--- a/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip
+++ b/modules/percolator/src/test/resources/indices/percolator/bwc_index_2.0.0.zip
Binary files differ
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java
index 678ecad149..2625b66eb8 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
@@ -31,7 +32,6 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.Retry;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.ParentTaskAssigningClient;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@@ -64,7 +64,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
* their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block.
*/
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>> {
- protected final ESLogger logger;
+ protected final Logger logger;
protected final BulkByScrollTask task;
protected final ThreadPool threadPool;
/**
@@ -81,7 +81,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
private final Retry bulkRetry;
private final ScrollableHitSource scrollSource;
- public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
+ public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, Request mainRequest, ActionListener<BulkIndexByScrollResponse> listener) {
this.task = task;
this.logger = logger;
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java
index 589a703646..1f135500df 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.bulk.BulkRequest;
@@ -26,7 +27,6 @@ import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.IdFieldMapper;
@@ -71,7 +71,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
*/
private final BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> scriptApplier;
- public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
+ public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, Request mainRequest,
ActionListener<BulkIndexByScrollResponse> listener,
ScriptService scriptService, ClusterState clusterState) {
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
index a547c5303b..ade1f8c2f8 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@@ -49,7 +49,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
* Task storing information about a currently running BulkByScroll request.
*/
public class BulkByScrollTask extends CancellableTask {
- private static final ESLogger logger = ESLoggerFactory.getLogger(BulkByScrollTask.class.getPackage().getName());
+ private static final Logger logger = ESLoggerFactory.getLogger(BulkByScrollTask.class.getPackage().getName());
/**
* The total number of documents this request will process. 0 means we don't yet know or, possibly, there are actually 0 documents
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java
index 030753e941..4d5f762340 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java
@@ -19,6 +19,9 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BackoffPolicy;
@@ -31,7 +34,6 @@ import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@@ -60,7 +62,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
private final ParentTaskAssigningClient client;
private final SearchRequest firstSearchRequest;
- public ClientScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
+ public ClientScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
Consumer<Exception> fail, ParentTaskAssigningClient client, SearchRequest firstSearchRequest) {
super(logger, backoffPolicy, threadPool, countSearchRetry, fail);
this.client = client;
@@ -105,7 +107,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
@Override
public void onFailure(Exception e) {
- logger.warn("Failed to clear scroll [{}]", e, scrollId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
}
});
}
@@ -144,11 +146,13 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
if (retries.hasNext()) {
retryCount += 1;
TimeValue delay = retries.next();
- logger.trace("retrying rejected search after [{}]", e, delay);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
countSearchRetry.run();
threadPool.schedule(delay, ThreadPool.Names.SAME, this);
} else {
- logger.warn("giving up on search because we retried [{}] times without success", e, retryCount);
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "giving up on search because we retried [{}] times without success", retryCount), e);
fail.accept(e);
}
} else {
@@ -242,7 +246,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
public Long getTTL() {
return fieldValue(TTLFieldMapper.NAME);
}
-
+
private <T> T fieldValue(String fieldName) {
SearchHitField field = delegate.field(fieldName);
return field == null ? null : field.value();
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java
index a8fe4f0311..abec32de99 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java
@@ -42,14 +42,11 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.reindex.remote.RemoteInfo;
-import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.SearchRequestParsers;
-import org.elasticsearch.search.aggregations.AggregatorParsers;
-import org.elasticsearch.search.suggest.Suggesters;
import java.io.IOException;
import java.util.List;
@@ -87,7 +84,8 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
builder.map(source);
try (XContentParser innerParser = parser.contentType().xContent().createParser(builder.bytes())) {
request.getSearchRequest().source().parseXContent(context.queryParseContext(innerParser),
- context.searchRequestParsers.aggParsers, context.searchRequestParsers.suggesters);
+ context.searchRequestParsers.aggParsers, context.searchRequestParsers.suggesters,
+ context.searchRequestParsers.searchExtParsers);
}
};
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java
index da601fca08..0b4b66222b 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.search.ShardSearchFailure;
@@ -28,7 +29,6 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -50,13 +50,13 @@ import static java.util.Objects.requireNonNull;
public abstract class ScrollableHitSource implements Closeable {
private final AtomicReference<String> scrollId = new AtomicReference<>();
- protected final ESLogger logger;
+ protected final Logger logger;
protected final BackoffPolicy backoffPolicy;
protected final ThreadPool threadPool;
protected final Runnable countSearchRetry;
protected final Consumer<Exception> fail;
- public ScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
+ public ScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
Consumer<Exception> fail) {
this.logger = logger;
this.backoffPolicy = backoffPolicy;
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java
index df07bd3485..99362e75f9 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.support.ActionFilters;
@@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.tasks.Task;
@@ -68,7 +68,7 @@ public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteB
*/
static class AsyncDeleteBySearchAction extends AbstractAsyncBulkIndexByScrollAction<DeleteByQueryRequest> {
- public AsyncDeleteBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
+ public AsyncDeleteBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
DeleteByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener,
ScriptService scriptService, ClusterState clusterState) {
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java
index 57d29283bb..33aca02835 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java
@@ -27,6 +27,7 @@ import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.reactor.IOReactorConfig;
import org.apache.http.message.BasicHeader;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BackoffPolicy;
@@ -44,7 +45,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -226,7 +226,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
*/
private List<Thread> createdThreads = emptyList();
- public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
+ public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
ReindexRequest request, ActionListener<BulkIndexByScrollResponse> listener,
ScriptService scriptService, ClusterState clusterState) {
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java
index 79c013482e..0f4bf5695d 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.reindex;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActionFilters;
@@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.IdFieldMapper;
@@ -81,7 +81,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
*/
static class AsyncIndexBySearchAction extends AbstractAsyncBulkIndexByScrollAction<UpdateByQueryRequest> {
- public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
+ public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener,
ScriptService scriptService, ClusterState clusterState) {
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java
index 00c9f0ae50..b2700618f0 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java
@@ -89,10 +89,10 @@ final class RemoteRequestBuilders {
params.put("sorts", sorts.toString());
}
}
- if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().isEmpty()) {
- StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().get(0));
- for (int i = 1; i < searchRequest.source().storedFields().size(); i++) {
- fields.append(',').append(searchRequest.source().storedFields().get(i));
+ if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().fieldNames().isEmpty()) {
+ StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().fieldNames().get(0));
+ for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) {
+ fields.append(',').append(searchRequest.source().storedFields().fieldNames().get(i));
}
String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields";
params.put(storedFieldsParamName, fields.toString());
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java
index 3ae1f33df5..7ecec0aa19 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java
@@ -47,10 +47,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
- * Parsers to convert the response from the remote host into objects useful for {@link RemoteScrollableHitSource}. Lots of data is
- * intentionally thrown on the floor because we don't need it but ObjectParser and friends are strict about blowing up when they see
- * elements they don't understand. So you'll see a lot of BiConsumers that look like "(b, v) -&gt; {}". That means "I don't care about the
- * value here, just throw it away and don't blow up.
+ * Parsers to convert the response from the remote host into objects useful for {@link RemoteScrollableHitSource}.
*/
final class RemoteResponseParsers {
private RemoteResponseParsers() {}
@@ -58,8 +55,8 @@ final class RemoteResponseParsers {
/**
* Parser for an individual {@code hit} element.
*/
- public static final ConstructingObjectParser<BasicHit, ParseFieldMatcherSupplier> HIT_PARSER = new ConstructingObjectParser<>("hit",
- a -> {
+ public static final ConstructingObjectParser<BasicHit, ParseFieldMatcherSupplier> HIT_PARSER =
+ new ConstructingObjectParser<>("hit", true, a -> {
int i = 0;
String index = (String) a[i++];
String type = (String) a[i++];
@@ -90,26 +87,23 @@ final class RemoteResponseParsers {
HIT_PARSER.declareString(BasicHit::setParent, new ParseField("_parent"));
HIT_PARSER.declareLong(BasicHit::setTTL, new ParseField("_ttl"));
HIT_PARSER.declareLong(BasicHit::setTimestamp, new ParseField("_timestamp"));
- HIT_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("_score"), ValueType.FLOAT_OR_NULL);
- HIT_PARSER.declareStringArray((b, v) -> {}, new ParseField("sort"));
}
/**
* Parser for the {@code hits} element. Parsed to an array of {@code [total (Long), hits (List<Hit>)]}.
*/
- public static final ConstructingObjectParser<Object[], ParseFieldMatcherSupplier> HITS_PARSER = new ConstructingObjectParser<>("hits",
- a -> a);
+ public static final ConstructingObjectParser<Object[], ParseFieldMatcherSupplier> HITS_PARSER =
+ new ConstructingObjectParser<>("hits", true, a -> a);
static {
HITS_PARSER.declareLong(constructorArg(), new ParseField("total"));
HITS_PARSER.declareObjectArray(constructorArg(), HIT_PARSER, new ParseField("hits"));
- HITS_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("max_score"), ValueType.FLOAT_OR_NULL);
}
/**
* Parser for {@code failed} shards in the {@code _shards} elements.
*/
public static final ConstructingObjectParser<SearchFailure, ParseFieldMatcherSupplier> SEARCH_FAILURE_PARSER =
- new ConstructingObjectParser<>("failure", a -> {
+ new ConstructingObjectParser<>("failure", true, a -> {
int i = 0;
String index = (String) a[i++];
Integer shardId = (Integer) a[i++];
@@ -135,7 +129,6 @@ final class RemoteResponseParsers {
return p.text();
}
}, new ParseField("reason"), ValueType.OBJECT_OR_STRING);
- SEARCH_FAILURE_PARSER.declareInt((b, v) -> {}, new ParseField("status"));
}
/**
@@ -143,7 +136,7 @@ final class RemoteResponseParsers {
* parses to an empty list.
*/
public static final ConstructingObjectParser<List<Throwable>, ParseFieldMatcherSupplier> SHARDS_PARSER =
- new ConstructingObjectParser<>("_shards", a -> {
+ new ConstructingObjectParser<>("_shards", true, a -> {
@SuppressWarnings("unchecked")
List<Throwable> failures = (List<Throwable>) a[0];
failures = failures == null ? emptyList() : failures;
@@ -151,13 +144,10 @@ final class RemoteResponseParsers {
});
static {
SHARDS_PARSER.declareObjectArray(optionalConstructorArg(), SEARCH_FAILURE_PARSER, new ParseField("failures"));
- SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("total"));
- SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("successful"));
- SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("failed"));
}
public static final ConstructingObjectParser<Response, ParseFieldMatcherSupplier> RESPONSE_PARSER =
- new ConstructingObjectParser<>("search_response", a -> {
+ new ConstructingObjectParser<>("search_response", true, a -> {
int i = 0;
Throwable catastrophicFailure = (Throwable) a[i++];
if (catastrophicFailure != null) {
@@ -189,9 +179,6 @@ final class RemoteResponseParsers {
RESPONSE_PARSER.declareString(optionalConstructorArg(), new ParseField("_scroll_id"));
RESPONSE_PARSER.declareObject(optionalConstructorArg(), HITS_PARSER, new ParseField("hits"));
RESPONSE_PARSER.declareObject(optionalConstructorArg(), SHARDS_PARSER, new ParseField("_shards"));
- RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("took"));
- RESPONSE_PARSER.declareBoolean((b, v) -> {}, new ParseField("terminated_early"));
- RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("status"));
}
/**
@@ -200,7 +187,7 @@ final class RemoteResponseParsers {
public static class ThrowableBuilder {
public static final BiFunction<XContentParser, ParseFieldMatcherSupplier, Throwable> PARSER;
static {
- ObjectParser<ThrowableBuilder, ParseFieldMatcherSupplier> parser = new ObjectParser<>("reason", ThrowableBuilder::new);
+ ObjectParser<ThrowableBuilder, ParseFieldMatcherSupplier> parser = new ObjectParser<>("reason", true, ThrowableBuilder::new);
PARSER = parser.andThen(ThrowableBuilder::build);
parser.declareString(ThrowableBuilder::setType, new ParseField("type"));
parser.declareString(ThrowableBuilder::setReason, new ParseField("reason"));
@@ -209,14 +196,6 @@ final class RemoteResponseParsers {
// So we can give a nice error for parsing exceptions
parser.declareInt(ThrowableBuilder::setLine, new ParseField("line"));
parser.declareInt(ThrowableBuilder::setColumn, new ParseField("col"));
-
- // So we don't blow up on search exceptions
- parser.declareString((b, v) -> {}, new ParseField("phase"));
- parser.declareBoolean((b, v) -> {}, new ParseField("grouped"));
- parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("failed_shards"), ValueType.OBJECT_ARRAY);
-
- // Just throw away the root_cause
- parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("root_cause"), ValueType.OBJECT_ARRAY);
}
private String type;
@@ -270,32 +249,14 @@ final class RemoteResponseParsers {
}
/**
- * Parses the {@code version} field of the main action. There are a surprising number of fields in this that we don't need!
- */
- public static final ConstructingObjectParser<Version, ParseFieldMatcherSupplier> VERSION_PARSER = new ConstructingObjectParser<>(
- "version", a -> Version.fromString((String) a[0]));
- static {
- VERSION_PARSER.declareString(constructorArg(), new ParseField("number"));
- VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("snapshot_build"));
- VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("build_snapshot"));
- VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_hash"));
- VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_date"));
- VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_timestamp"));
- VERSION_PARSER.declareString((p, v) -> {}, new ParseField("lucene_version"));
- }
-
- /**
* Parses the main action to return just the {@linkplain Version} that it returns. We throw everything else out.
*/
public static final ConstructingObjectParser<Version, ParseFieldMatcherSupplier> MAIN_ACTION_PARSER = new ConstructingObjectParser<>(
- "/", a -> (Version) a[0]);
+ "/", true, a -> (Version) a[0]);
static {
- MAIN_ACTION_PARSER.declareBoolean((p, v) -> {}, new ParseField("ok"));
- MAIN_ACTION_PARSER.declareInt((p, v) -> {}, new ParseField("status"));
- MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
- MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_name"));
- MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
- MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("tagline"));
- MAIN_ACTION_PARSER.declareObject(constructorArg(), VERSION_PARSER, new ParseField("version"));
+ ConstructingObjectParser<Version, ParseFieldMatcherSupplier> versionParser = new ConstructingObjectParser<>(
+ "version", true, a -> Version.fromString((String) a[0]));
+ versionParser.declareString(constructorArg(), new ParseField("number"));
+ MAIN_ACTION_PARSER.declareObject(constructorArg(), versionParser, new ParseField("version"));
}
}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java
index 41f6dd5f94..207948c921 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java
@@ -21,6 +21,9 @@ package org.elasticsearch.index.reindex.remote;
import org.apache.http.HttpEntity;
import org.apache.http.util.EntityUtils;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.Version;
@@ -34,7 +37,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -70,7 +72,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
private final SearchRequest searchRequest;
Version remoteVersion;
- public RemoteScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
+ public RemoteScrollableHitSource(Logger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
Consumer<Exception> fail, RestClient client, BytesReference query, SearchRequest searchRequest) {
super(logger, backoffPolicy, threadPool, countSearchRetry, fail);
this.query = query;
@@ -98,7 +100,6 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
void lookupRemoteVersion(Consumer<Version> onVersion) {
execute("GET", "", emptyMap(), null, MAIN_ACTION_PARSER, onVersion);
-
}
private void onStartResponse(Consumer<? super Response> onResponse, Response response) {
@@ -119,7 +120,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
@Override
protected void clearScroll(String scrollId) {
// Need to throw out response....
- client.performRequest("DELETE", scrollPath(), emptyMap(), scrollEntity(scrollId), new ResponseListener() {
+ client.performRequestAsync("DELETE", scrollPath(), emptyMap(), scrollEntity(scrollId), new ResponseListener() {
@Override
public void onSuccess(org.elasticsearch.client.Response response) {
logger.debug("Successfully cleared [{}]", scrollId);
@@ -127,7 +128,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
@Override
public void onFailure(Exception t) {
- logger.warn("Failed to clear scroll [{}]", t, scrollId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), t);
}
});
}
@@ -141,7 +142,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
@Override
protected void doRun() throws Exception {
- client.performRequest(method, uri, params, entity, new ResponseListener() {
+ client.performRequestAsync(method, uri, params, entity, new ResponseListener() {
@Override
public void onSuccess(org.elasticsearch.client.Response response) {
// Restore the thread context to get the precious headers
@@ -174,7 +175,8 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
if (RestStatus.TOO_MANY_REQUESTS.getStatus() == re.getResponse().getStatusLine().getStatusCode()) {
if (retries.hasNext()) {
TimeValue delay = retries.next();
- logger.trace("retrying rejected search after [{}]", e, delay);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
countSearchRetry.run();
threadPool.schedule(delay, ThreadPool.Names.SAME, RetryHelper.this);
return;
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java
index 1213762155..12ed0ed090 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java
@@ -31,10 +31,13 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.remote.RemoteInfo;
import org.elasticsearch.test.ESTestCase;
+import java.util.Collections;
+
import static java.util.Collections.emptyMap;
import static org.hamcrest.Matchers.containsString;
@@ -54,7 +57,8 @@ public class ReindexSourceTargetValidationTests extends ESTestCase {
.put(index("source", "source_multi"), true)
.put(index("source2", "source_multi"), true)).build();
private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY);
- private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER);
+ private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY,
+ new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), INDEX_NAME_EXPRESSION_RESOLVER);
public void testObviousCases() {
fails("target", "target");
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java
index f8cbee1732..336d88d3e1 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java
@@ -110,7 +110,7 @@ public class RestReindexActionTests extends ESTestCase {
}
try (XContentParser p = JsonXContent.jsonXContent.createParser(request)) {
ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest());
- SearchRequestParsers searchParsers = new SearchRequestParsers(new IndicesQueriesRegistry(), null, null);
+ SearchRequestParsers searchParsers = new SearchRequestParsers(new IndicesQueriesRegistry(), null, null, null);
RestReindexAction.PARSER.parse(p, r, new ReindexParseContext(searchParsers, ParseFieldMatcher.STRICT));
assertEquals("localhost", r.getRemoteInfo().getHost());
assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices());
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java
index c0c1d681a0..92e2598a5a 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java
@@ -28,7 +28,6 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.Retry;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.network.NetworkModule;
-import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@@ -43,7 +42,6 @@ import org.junit.Before;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.List;
import java.util.concurrent.CyclicBarrier;
@@ -63,12 +61,12 @@ public class RetryTests extends ESSingleNodeTestCase {
private List<CyclicBarrier> blockedExecutors = new ArrayList<>();
- private boolean useNetty4;
+ private boolean useNetty3;
@Before
public void setUp() throws Exception {
super.setUp();
- useNetty4 = randomBoolean();
+ useNetty3 = randomBoolean();
createIndex("source");
// Build the test data. Don't use indexRandom because that won't work consistently with such small thread pools.
BulkRequestBuilder bulk = client().prepareBulk();
@@ -112,9 +110,9 @@ public class RetryTests extends ESSingleNodeTestCase {
settings.put(NetworkModule.HTTP_ENABLED.getKey(), true);
// Whitelist reindexing from the http host we're going to use
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself");
- if (useNetty4) {
- settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME);
- settings.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME);
+ if (useNetty3) {
+ settings.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME);
+ settings.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME);
}
return settings.build();
}
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java
index b0fc9b428b..1a262a32d3 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java
@@ -206,14 +206,6 @@ public class RoundTripTests extends ESTestCase {
emptyMap()); // Params
}
- private long randomPositiveLong() {
- long l;
- do {
- l = randomLong();
- } while (l < 0);
- return l;
- }
-
private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) {
assertEquals(expected.getTook(), actual.getTook());
assertTaskStatusEquals(expected.getStatus(), actual.getStatus());
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java
index 6407bc0195..351eb49f90 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java
@@ -113,11 +113,42 @@ public class RemoteScrollableHitSourceTests extends ESTestCase {
}
public void testLookupRemoteVersion() throws Exception {
- sourceWithMockedRemoteCall(false, "main/0_20_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.20.5"), v));
- sourceWithMockedRemoteCall(false, "main/0_90_13.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.90.13"), v));
- sourceWithMockedRemoteCall(false, "main/1_7_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("1.7.5"), v));
- sourceWithMockedRemoteCall(false, "main/2_3_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_2_3_3, v));
- sourceWithMockedRemoteCall(false, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_5_0_0_alpha3, v));
+ AtomicBoolean called = new AtomicBoolean();
+ sourceWithMockedRemoteCall(false, "main/0_20_5.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.fromString("0.20.5"), v);
+ called.set(true);
+ });
+ assertTrue(called.get());
+ called.set(false);
+ sourceWithMockedRemoteCall(false, "main/0_90_13.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.fromString("0.90.13"), v);
+ called.set(true);
+ });
+ assertTrue(called.get());
+ called.set(false);
+ sourceWithMockedRemoteCall(false, "main/1_7_5.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.fromString("1.7.5"), v);
+ called.set(true);
+ });
+ assertTrue(called.get());
+ called.set(false);
+ sourceWithMockedRemoteCall(false, "main/2_3_3.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.V_2_3_3, v);
+ called.set(true);
+ });
+ assertTrue(called.get());
+ called.set(false);
+ sourceWithMockedRemoteCall(false, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.V_5_0_0_alpha3, v);
+ called.set(true);
+ });
+ assertTrue(called.get());
+ called.set(false);
+ sourceWithMockedRemoteCall(false, "main/with_unknown_fields.json").lookupRemoteVersion(v -> {
+ assertEquals(Version.V_5_0_0_alpha3, v);
+ called.set(true);
+ });
+ assertTrue(called.get());
}
public void testParseStartOk() throws Exception {
diff --git a/modules/reindex/src/test/resources/responses/main/with_unknown_fields.json b/modules/reindex/src/test/resources/responses/main/with_unknown_fields.json
new file mode 100644
index 0000000000..6aec249631
--- /dev/null
+++ b/modules/reindex/src/test/resources/responses/main/with_unknown_fields.json
@@ -0,0 +1,22 @@
+{
+ "name" : "Crazy Node With Weird Stuff In The Response",
+ "cluster_name" : "distribution_run",
+ "cats": "knock things over",
+ "cake": "is tasty",
+ "version" : {
+ "number" : "5.0.0-alpha3",
+ "build_hash" : "42e092f",
+ "build_date" : "2016-05-26T16:55:45.405Z",
+ "build_snapshot" : true,
+ "lucene_version" : "6.0.0",
+ "blort_version" : "not even a valid version number, what are you going to do about it?"
+ },
+ "tagline" : "You Know, for Search",
+ "extra_object" : {
+ "stuff": "stuff"
+ },
+ "extra_array" : [
+ "stuff",
+ "more stuff"
+ ]
+}
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml
index 14b4ae99ea..fc6081a3de 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml
@@ -120,7 +120,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: -1
task_id: $task
@@ -180,7 +180,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: 1
task_id: $task
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml
index 74ff3f6f61..27cdecf93f 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml
@@ -142,7 +142,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: -1
task_id: $task
@@ -197,7 +197,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: 1
task_id: $task
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml
index 59ca0976da..eb64bd8d38 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml
@@ -108,7 +108,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: -1
task_id: $task
@@ -155,7 +155,7 @@
- set: {task: task}
- do:
- reindex.rethrottle:
+ reindex_rethrottle:
requests_per_second: 1
task_id: $task
diff --git a/modules/transport-netty3/build.gradle b/modules/transport-netty3/build.gradle
index e13170e5c2..eae0608f92 100644
--- a/modules/transport-netty3/build.gradle
+++ b/modules/transport-netty3/build.gradle
@@ -123,5 +123,5 @@ thirdPartyAudit.excludes = [
// from org.jboss.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
'org.slf4j.Logger',
- 'org.slf4j.LoggerFactory',
+ 'org.slf4j.LoggerFactory'
]
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java
index b7934da00e..c60f47ee3d 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java
@@ -21,9 +21,10 @@ package org.elasticsearch.http.netty3;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@@ -147,9 +148,9 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
Setting.byteSizeSetting("transport.netty.receive_predictor_size",
settings -> {
long defaultReceiverPredictor = 512 * 1024;
- if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
+ if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) {
// we can guess a better default...
- long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / SETTING_HTTP_WORKER_COUNT.get
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / SETTING_HTTP_WORKER_COUNT.get
(settings));
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
@@ -216,7 +217,6 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
private final Netty3CorsConfig corsConfig;
- @Inject
public Netty3HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool) {
super(settings);
this.networkService = networkService;
@@ -246,11 +246,11 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
- if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
- receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
+ if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) {
+ receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.getBytes());
} else {
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory(
- (int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
+ (int) receivePredictorMin.getBytes(), (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes());
}
this.compression = SETTING_HTTP_COMPRESSION.get(settings);
@@ -260,7 +260,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
this.corsConfig = buildCorsConfig(settings);
// validate max content length
- if (maxContentLength.bytes() > Integer.MAX_VALUE) {
+ if (maxContentLength.getBytes() > Integer.MAX_VALUE) {
logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
}
@@ -285,25 +285,25 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
this.serverOpenChannels = new Netty3OpenChannelsHandler(logger);
if (blockingServer) {
serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
- Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
- Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker"))
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))
));
} else {
serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
- Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
- Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker")),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)),
workerCount));
}
serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory());
serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
- if (tcpSendBufferSize.bytes() > 0) {
+ if (tcpSendBufferSize.getBytes() > 0) {
- serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
+ serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes());
}
- if (tcpReceiveBufferSize.bytes() > 0) {
- serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes());
}
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
@@ -466,7 +466,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
if (boundTransportAddress == null) {
return null;
}
- return new HttpInfo(boundTransportAddress, maxContentLength.bytes());
+ return new HttpInfo(boundTransportAddress, maxContentLength.getBytes());
}
@Override
@@ -495,10 +495,18 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
return;
}
if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) {
- logger.warn("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Caught exception while handling client http traffic, closing connection {}",
+ ctx.getChannel()),
+ e.getCause());
ctx.getChannel().close();
} else {
- logger.debug("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Caught exception while handling client http traffic, closing connection {}",
+ ctx.getChannel()),
+ e.getCause());
ctx.getChannel().close();
}
}
@@ -523,15 +531,15 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("openChannels", transport.serverOpenChannels);
HttpRequestDecoder requestDecoder = new HttpRequestDecoder(
- (int) transport.maxInitialLineLength.bytes(),
- (int) transport.maxHeaderSize.bytes(),
- (int) transport.maxChunkSize.bytes()
+ (int) transport.maxInitialLineLength.getBytes(),
+ (int) transport.maxHeaderSize.getBytes(),
+ (int) transport.maxChunkSize.getBytes()
);
- if (transport.maxCumulationBufferCapacity.bytes() >= 0) {
- if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ if (transport.maxCumulationBufferCapacity.getBytes() >= 0) {
+ if (transport.maxCumulationBufferCapacity.getBytes() > Integer.MAX_VALUE) {
requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
} else {
- requestDecoder.setMaxCumulationBufferCapacity((int) transport.maxCumulationBufferCapacity.bytes());
+ requestDecoder.setMaxCumulationBufferCapacity((int) transport.maxCumulationBufferCapacity.getBytes());
}
}
if (transport.maxCompositeBufferComponents != -1) {
@@ -539,7 +547,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem
}
pipeline.addLast("decoder", requestDecoder);
pipeline.addLast("decoder_compress", new HttpContentDecompressor());
- HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
+ HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.getBytes());
if (transport.maxCompositeBufferComponents != -1) {
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java
index a1f9985c6a..d63876335c 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java
@@ -19,19 +19,26 @@
package org.elasticsearch.transport;
-import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.netty3.Netty3HttpServerTransport;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.netty3.Netty3Transport;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
-public class Netty3Plugin extends Plugin {
+public class Netty3Plugin extends Plugin implements NetworkPlugin {
public static final String NETTY_TRANSPORT_NAME = "netty3";
public static final String NETTY_HTTP_TRANSPORT_NAME = "netty3";
@@ -57,11 +64,20 @@ public class Netty3Plugin extends Plugin {
);
}
- public void onModule(NetworkModule networkModule) {
- if (networkModule.canRegisterHttpExtensions()) {
- networkModule.registerHttpTransport(NETTY_HTTP_TRANSPORT_NAME, Netty3HttpServerTransport.class);
- }
- networkModule.registerTransport(NETTY_TRANSPORT_NAME, Netty3Transport.class);
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) {
+ return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty3Transport(settings, threadPool, networkService, bigArrays,
+ namedWriteableRegistry, circuitBreakerService));
}
+ @Override
+ public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME, () -> new Netty3HttpServerTransport(settings, networkService,
+ bigArrays, threadPool));
+ }
}
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java
index 6ff941c48e..03c9671ad7 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java
@@ -19,8 +19,8 @@
package org.elasticsearch.transport.netty3;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.SuppressLoggerChecks;
-import org.elasticsearch.common.logging.ESLogger;
import org.jboss.netty.logging.AbstractInternalLogger;
/**
@@ -29,9 +29,9 @@ import org.jboss.netty.logging.AbstractInternalLogger;
@SuppressLoggerChecks(reason = "safely delegates to logger")
final class Netty3InternalESLogger extends AbstractInternalLogger {
- private final ESLogger logger;
+ private final Logger logger;
- Netty3InternalESLogger(ESLogger logger) {
+ Netty3InternalESLogger(Logger logger) {
this.logger = logger;
}
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java
index 6a7732723c..bbfb775d0e 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3OpenChannelsHandler.java
@@ -19,8 +19,8 @@
package org.elasticsearch.transport.netty3;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.jboss.netty.channel.Channel;
@@ -42,9 +42,9 @@ public class Netty3OpenChannelsHandler implements ChannelUpstreamHandler, Releas
final CounterMetric openChannelsMetric = new CounterMetric();
final CounterMetric totalChannelsMetric = new CounterMetric();
- final ESLogger logger;
+ final Logger logger;
- public Netty3OpenChannelsHandler(ESLogger logger) {
+ public Netty3OpenChannelsHandler(Logger logger) {
this.logger = logger;
}
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java
index f5f32044eb..8d1a6edd78 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java
@@ -19,13 +19,13 @@
package org.elasticsearch.transport.netty3;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkService.TcpSettings;
@@ -46,7 +46,6 @@ import org.elasticsearch.transport.TransportServiceAdapter;
import org.elasticsearch.transport.TransportSettings;
import org.jboss.netty.bootstrap.ClientBootstrap;
import org.jboss.netty.bootstrap.ServerBootstrap;
-import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFuture;
@@ -111,9 +110,9 @@ public class Netty3Transport extends TcpTransport<Channel> {
"transport.netty.receive_predictor_size",
settings -> {
long defaultReceiverPredictor = 512 * 1024;
- if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
+ if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) {
// we can guess a better default...
- long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / WORKER_COUNT.get(settings));
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / WORKER_COUNT.get(settings));
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
return new ByteSizeValue(defaultReceiverPredictor).toString();
@@ -138,7 +137,6 @@ public class Netty3Transport extends TcpTransport<Channel> {
protected volatile ClientBootstrap clientBootstrap;
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
- @Inject
public Netty3Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
super("netty3", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
@@ -149,11 +147,11 @@ public class Netty3Transport extends TcpTransport<Channel> {
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
- if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
- receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
+ if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) {
+ receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.getBytes());
} else {
- receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(),
- (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
+ receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.getBytes(),
+ (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes());
}
}
@@ -213,13 +211,13 @@ public class Netty3Transport extends TcpTransport<Channel> {
clientBootstrap.setOption("keepAlive", tcpKeepAlive);
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
- if (tcpSendBufferSize.bytes() > 0) {
- clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes());
+ if (tcpSendBufferSize.getBytes() > 0) {
+ clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.getBytes());
}
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
- if (tcpReceiveBufferSize.bytes() > 0) {
- clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes());
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.getBytes());
}
clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
@@ -254,13 +252,13 @@ public class Netty3Transport extends TcpTransport<Channel> {
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size",
TCP_SEND_BUFFER_SIZE.get(settings));
- if (fallbackTcpSendBufferSize.bytes() >= 0) {
+ if (fallbackTcpSendBufferSize.getBytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
}
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size",
TCP_RECEIVE_BUFFER_SIZE.get(settings));
- if (fallbackTcpBufferSize.bytes() >= 0) {
+ if (fallbackTcpBufferSize.getBytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
}
@@ -286,8 +284,8 @@ public class Netty3Transport extends TcpTransport<Channel> {
receivePredictorMax);
}
- final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name);
- final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name);
+ final ThreadFactory bossFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX, name);
+ final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name);
final ServerBootstrap serverBootstrap;
if (blockingServer) {
serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
@@ -307,11 +305,11 @@ public class Netty3Transport extends TcpTransport<Channel> {
if (!"default".equals(tcpKeepAlive)) {
serverBootstrap.setOption("child.keepAlive", Booleans.parseBoolean(tcpKeepAlive, null));
}
- if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
- serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
+ if (tcpSendBufferSize != null && tcpSendBufferSize.getBytes() > 0) {
+ serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes());
}
- if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
- serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
+ if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.getBytes() > 0) {
+ serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes());
}
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
@@ -419,11 +417,11 @@ public class Netty3Transport extends TcpTransport<Channel> {
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline channelPipeline = Channels.pipeline();
Netty3SizeHeaderFrameDecoder sizeHeader = new Netty3SizeHeaderFrameDecoder();
- if (nettyTransport.maxCumulationBufferCapacity.bytes() >= 0) {
- if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ if (nettyTransport.maxCumulationBufferCapacity.getBytes() >= 0) {
+ if (nettyTransport.maxCumulationBufferCapacity.getBytes() > Integer.MAX_VALUE) {
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
} else {
- sizeHeader.setMaxCumulationBufferCapacity((int) nettyTransport.maxCumulationBufferCapacity.bytes());
+ sizeHeader.setMaxCumulationBufferCapacity((int) nettyTransport.maxCumulationBufferCapacity.getBytes());
}
}
if (nettyTransport.maxCompositeBufferComponents != -1) {
@@ -457,11 +455,11 @@ public class Netty3Transport extends TcpTransport<Channel> {
ChannelPipeline channelPipeline = Channels.pipeline();
channelPipeline.addLast("openChannels", nettyTransport.serverOpenChannels);
Netty3SizeHeaderFrameDecoder sizeHeader = new Netty3SizeHeaderFrameDecoder();
- if (nettyTransport.maxCumulationBufferCapacity.bytes() > 0) {
- if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ if (nettyTransport.maxCumulationBufferCapacity.getBytes() > 0) {
+ if (nettyTransport.maxCumulationBufferCapacity.getBytes() > Integer.MAX_VALUE) {
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
} else {
- sizeHeader.setMaxCumulationBufferCapacity((int) nettyTransport.maxCumulationBufferCapacity.bytes());
+ sizeHeader.setMaxCumulationBufferCapacity((int) nettyTransport.maxCumulationBufferCapacity.getBytes());
}
}
if (nettyTransport.maxCompositeBufferComponents != -1) {
@@ -554,7 +552,7 @@ public class Netty3Transport extends TcpTransport<Channel> {
try {
serverBootstrap.releaseExternalResources();
} catch (Exception e) {
- logger.debug("Error closing serverBootstrap for profile [{}]", e, name);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("Error closing serverBootstrap for profile [{}]", name), e);
}
}
serverBootstraps.clear();
diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java
index 17a367735d..2cbf92997b 100644
--- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java
+++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.transport.netty3;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.common.SuppressForbidden;
@@ -121,7 +123,9 @@ public class Netty3Utils {
}
});
} catch (final SecurityException e) {
- Loggers.getLogger(Netty3Utils.class).debug("Unable to get/set System Property: {}", e, key);
+ Loggers
+ .getLogger(Netty3Utils.class)
+ .debug((Supplier<?>) () -> new ParameterizedMessage("Unable to get/set System Property: {}", key), e);
}
}
}
diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java
index a7d9805cf3..ba72ade58e 100644
--- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java
+++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java
@@ -67,7 +67,8 @@ public class Netty3SizeHeaderFrameDecoderTests extends ESTestCase {
nettyTransport = new Netty3Transport(settings, threadPool, networkService, bigArrays,
new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
nettyTransport.start();
- TransportService transportService = new TransportService(settings, nettyTransport, threadPool);
+ TransportService transportService = new TransportService(settings, nettyTransport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
nettyTransport.transportServiceAdapter(transportService.createAdapter());
TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses();
diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java
index bb169aa8d7..7c44fc4d4e 100644
--- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java
+++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java
@@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseOptions;
+import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
@@ -65,13 +66,13 @@ public class Netty3ScheduledPingTests extends ESTestCase {
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
final Netty3Transport nettyA = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
- MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
+ MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
serviceA.start();
serviceA.acceptIncomingRequests();
final Netty3Transport nettyB = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
- MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
+ MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
serviceB.start();
serviceB.acceptIncomingRequests();
diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportIT.java
index f2863c5b5d..9e670da635 100644
--- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportIT.java
+++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportIT.java
@@ -24,7 +24,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.network.NetworkModule;
@@ -32,10 +31,12 @@ import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportSettings;
import org.jboss.netty.channel.Channel;
@@ -45,6 +46,8 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is;
@@ -83,13 +86,19 @@ public class Netty3TransportIT extends ESNetty3IntegTestCase {
public static final class ExceptionThrowingNetty3Transport extends Netty3Transport {
- public static class TestPlugin extends Plugin {
- public void onModule(NetworkModule module) {
- module.registerTransport("exception-throwing", ExceptionThrowingNetty3Transport.class);
+ public static class TestPlugin extends Plugin implements NetworkPlugin {
+
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("exception-throwing", () ->
+ new ExceptionThrowingNetty3Transport(settings, threadPool, networkService, bigArrays,
+ namedWriteableRegistry, circuitBreakerService));
}
}
- @Inject
public ExceptionThrowingNetty3Transport(
Settings settings,
ThreadPool threadPool,
diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java
index 5b862908ea..b90b788f90 100644
--- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java
+++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java
@@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.net.InetAddress;
@@ -55,7 +56,7 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase
return version;
}
};
- return new MockTransportService(Settings.EMPTY, transport, threadPool);
+ return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
}
@Override
diff --git a/modules/transport-netty3/src/test/resources/rest-api-spec/test/10_basic.yaml b/modules/transport-netty3/src/test/resources/rest-api-spec/test/10_basic.yaml
index e25074fb90..b2267343ec 100644
--- a/modules/transport-netty3/src/test/resources/rest-api-spec/test/10_basic.yaml
+++ b/modules/transport-netty3/src/test/resources/rest-api-spec/test/10_basic.yaml
@@ -11,3 +11,9 @@
nodes.info: {}
- match: { nodes.$master.modules.0.name: transport-netty3 }
+
+ - do:
+ cluster.stats: {}
+
+ - match: { nodes.network_types.transport_types.netty3: 2 }
+ - match: { nodes.network_types.http_types.netty3: 2 }
diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle
index c87ddabffb..4fc38bc394 100644
--- a/modules/transport-netty4/build.gradle
+++ b/modules/transport-netty4/build.gradle
@@ -33,13 +33,13 @@ compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-tr
dependencies {
// network stack
- compile "io.netty:netty-buffer:4.1.4.Final"
- compile "io.netty:netty-codec:4.1.4.Final"
- compile "io.netty:netty-codec-http:4.1.4.Final"
- compile "io.netty:netty-common:4.1.4.Final"
- compile "io.netty:netty-handler:4.1.4.Final"
- compile "io.netty:netty-resolver:4.1.4.Final"
- compile "io.netty:netty-transport:4.1.4.Final"
+ compile "io.netty:netty-buffer:4.1.5.Final"
+ compile "io.netty:netty-codec:4.1.5.Final"
+ compile "io.netty:netty-codec-http:4.1.5.Final"
+ compile "io.netty:netty-common:4.1.5.Final"
+ compile "io.netty:netty-handler:4.1.5.Final"
+ compile "io.netty:netty-resolver:4.1.5.Final"
+ compile "io.netty:netty-transport:4.1.5.Final"
}
integTest {
@@ -114,9 +114,9 @@ thirdPartyAudit.excludes = [
'com.ning.compress.lzf.LZFEncoder',
'com.ning.compress.lzf.util.ChunkDecoderFactory',
'com.ning.compress.lzf.util.ChunkEncoderFactory',
- 'javassist/ClassClassPath',
- 'javassist/ClassPath',
- 'javassist/ClassPool',
+ 'javassist.ClassClassPath',
+ 'javassist.ClassPath',
+ 'javassist.ClassPool',
'javassist.CtClass',
'javassist.CtMethod',
'lzma.sdk.lzma.Encoder',
@@ -125,9 +125,9 @@ thirdPartyAudit.excludes = [
'net.jpountz.lz4.LZ4FastDecompressor',
'net.jpountz.xxhash.StreamingXXHash32',
'net.jpountz.xxhash.XXHashFactory',
- 'org.apache.logging.log4j.LogManager',
- 'org.apache.logging.log4j.Logger',
+ 'org.apache.tomcat.Apr',
'org.apache.tomcat.jni.CertificateRequestedCallback',
+ 'org.apache.tomcat.jni.CertificateRequestedCallback$KeyMaterial',
'org.apache.tomcat.jni.CertificateVerifier',
'org.apache.tomcat.jni.SessionTicketKey',
'org.eclipse.jetty.alpn.ALPN$ClientProvider',
@@ -136,6 +136,8 @@ thirdPartyAudit.excludes = [
'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
'io.netty.util.internal.PlatformDependent0',
+ 'io.netty.util.internal.PlatformDependent0$2',
+ 'io.netty.util.internal.PlatformDependent0$3',
'io.netty.util.internal.UnsafeAtomicIntegerFieldUpdater',
'io.netty.util.internal.UnsafeAtomicLongFieldUpdater',
'io.netty.util.internal.UnsafeAtomicReferenceFieldUpdater',
diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.4.Final.jar.sha1
deleted file mode 100644
index 36ae08b1c8..0000000000
--- a/modules/transport-netty4/licenses/netty-buffer-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5e199381c808377faeeb80e69f365246004d6f9f \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..e64426f033
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+b5fb6bccda4d63d4a74c9faccdf32f77ab66abc1 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.4.Final.jar.sha1
deleted file mode 100644
index 83a0990f51..0000000000
--- a/modules/transport-netty4/licenses/netty-codec-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-86fc9367492cdca25542fb037467ab853a77ff62 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..66484a9304
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+66bbf9324fa36467d041083f89328e2a24ec4f67 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.4.Final.jar.sha1
deleted file mode 100644
index 292dd9261e..0000000000
--- a/modules/transport-netty4/licenses/netty-codec-http-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f6ef27ed46dd806dc27c1f0ed2e6bcfad12d28cc \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..93a445416a
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+087bda1b9ec7e3f75ca721fc87735cbedad2aa1a \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-common-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.4.Final.jar.sha1
deleted file mode 100644
index c069aa3c2b..0000000000
--- a/modules/transport-netty4/licenses/netty-common-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e267ca463a0dc6292cf5e0528c4b59d6d5f76ff5 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..95da96aaf8
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+607f8433d8782445e72abe34e43a7e57e86a5e6c \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.4.Final.jar.sha1
deleted file mode 100644
index f671aa442a..0000000000
--- a/modules/transport-netty4/licenses/netty-handler-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5674b3bc1203d913da71ba98bb7d43832c7a71e7 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..7aadd85058
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+6262900ee9487e62560030a136160df953b1cd6b \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.4.Final.jar.sha1
deleted file mode 100644
index d63592e7d2..0000000000
--- a/modules/transport-netty4/licenses/netty-resolver-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-19f86688069a018cf5e45f7a9f9a02971b9e31ee \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..060655014a
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+5f367bedcdc185a727fda3296b9a18014cdc22c4 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.4.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.4.Final.jar.sha1
deleted file mode 100644
index da6299b130..0000000000
--- a/modules/transport-netty4/licenses/netty-transport-4.1.4.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6c99d2323d75d74b9571c4ddbdc411d96bf8d780 \ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1
new file mode 100644
index 0000000000..0e7dc27daa
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1
@@ -0,0 +1 @@
+37126b370722ff9631ee13c91139aacec0a71d1d \ No newline at end of file
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java
index 68727db27c..cd0a208d2e 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java
@@ -43,9 +43,10 @@ import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.timeout.ReadTimeoutException;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@@ -149,9 +150,9 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
Setting.byteSizeSetting("transport.netty.receive_predictor_size",
settings -> {
long defaultReceiverPredictor = 512 * 1024;
- if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
+ if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) {
// we can guess a better default...
- long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / SETTING_HTTP_WORKER_COUNT.get
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / SETTING_HTTP_WORKER_COUNT.get
(settings));
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
@@ -218,7 +219,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
private final Netty4CorsConfig corsConfig;
- @Inject
public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool) {
super(settings);
this.networkService = networkService;
@@ -247,13 +247,13 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
- if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
- recvByteBufAllocator = new FixedRecvByteBufAllocator(Math.toIntExact(receivePredictorMax.bytes()));
+ if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) {
+ recvByteBufAllocator = new FixedRecvByteBufAllocator(Math.toIntExact(receivePredictorMax.getBytes()));
} else {
recvByteBufAllocator = new AdaptiveRecvByteBufAllocator(
- Math.toIntExact(receivePredictorMin.bytes()),
- Math.toIntExact(receivePredictorMin.bytes()),
- Math.toIntExact(receivePredictorMax.bytes()));
+ Math.toIntExact(receivePredictorMin.getBytes()),
+ Math.toIntExact(receivePredictorMin.getBytes()),
+ Math.toIntExact(receivePredictorMax.getBytes()));
}
this.compression = SETTING_HTTP_COMPRESSION.get(settings);
@@ -263,7 +263,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
this.corsConfig = buildCorsConfig(settings);
// validate max content length
- if (maxContentLength.bytes() > Integer.MAX_VALUE) {
+ if (maxContentLength.getBytes() > Integer.MAX_VALUE) {
logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
}
@@ -290,10 +290,10 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
serverBootstrap = new ServerBootstrap();
if (blockingServer) {
- serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, "http_server_worker")));
+ serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(OioServerSocketChannel.class);
} else {
- serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, "http_server_worker")));
+ serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(NioServerSocketChannel.class);
}
@@ -303,13 +303,13 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
- if (tcpSendBufferSize.bytes() > 0) {
- serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.bytes()));
+ if (tcpSendBufferSize.getBytes() > 0) {
+ serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
- if (tcpReceiveBufferSize.bytes() > 0) {
- serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.bytes()));
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes()));
}
serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
@@ -483,7 +483,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
if (boundTransportAddress == null) {
return null;
}
- return new HttpInfo(boundTransportAddress, maxContentLength.bytes());
+ return new HttpInfo(boundTransportAddress, maxContentLength.getBytes());
}
@Override
@@ -512,10 +512,16 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
return;
}
if (!NetworkExceptionHelper.isCloseConnectionException(cause)) {
- logger.warn("caught exception while handling client http traffic, closing connection {}", cause, ctx.channel());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "caught exception while handling client http traffic, closing connection {}", ctx.channel()),
+ cause);
ctx.channel().close();
} else {
- logger.debug("caught exception while handling client http traffic, closing connection {}", cause, ctx.channel());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "caught exception while handling client http traffic, closing connection {}", ctx.channel()),
+ cause);
ctx.channel().close();
}
}
@@ -542,14 +548,14 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast("openChannels", transport.serverOpenChannels);
final HttpRequestDecoder decoder = new HttpRequestDecoder(
- Math.toIntExact(transport.maxInitialLineLength.bytes()),
- Math.toIntExact(transport.maxHeaderSize.bytes()),
- Math.toIntExact(transport.maxChunkSize.bytes()));
+ Math.toIntExact(transport.maxInitialLineLength.getBytes()),
+ Math.toIntExact(transport.maxHeaderSize.getBytes()),
+ Math.toIntExact(transport.maxChunkSize.getBytes()));
decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR);
ch.pipeline().addLast("decoder", decoder);
ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor());
ch.pipeline().addLast("encoder", new HttpResponseEncoder());
- final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.bytes()));
+ final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.getBytes()));
if (transport.maxCompositeBufferComponents != -1) {
aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java
index 8f449b95ec..08f87a2779 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java
@@ -19,17 +19,27 @@
package org.elasticsearch.transport;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.netty4.Netty4HttpServerTransport;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.netty4.Netty4Transport;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
-public class Netty4Plugin extends Plugin {
+public class Netty4Plugin extends Plugin implements NetworkPlugin {
public static final String NETTY_TRANSPORT_NAME = "netty4";
public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4";
@@ -66,11 +76,21 @@ public class Netty4Plugin extends Plugin {
.build();
}
- public void onModule(NetworkModule networkModule) {
- if (networkModule.canRegisterHttpExtensions()) {
- networkModule.registerHttpTransport(NETTY_HTTP_TRANSPORT_NAME, Netty4HttpServerTransport.class);
- }
- networkModule.registerTransport(NETTY_TRANSPORT_NAME, Netty4Transport.class);
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty4Transport(settings, threadPool, networkService, bigArrays,
+ namedWriteableRegistry, circuitBreakerService));
}
+ @Override
+ public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME,
+ () -> new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool));
+ }
}
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java
index 6155529401..aaa277e34b 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java
@@ -20,14 +20,14 @@
package org.elasticsearch.transport.netty4;
import io.netty.util.internal.logging.AbstractInternalLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.SuppressLoggerChecks;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@SuppressLoggerChecks(reason = "safely delegates to logger")
class Netty4InternalESLogger extends AbstractInternalLogger {
- private final ESLogger logger;
+ private final Logger logger;
Netty4InternalESLogger(final String name) {
super(name);
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java
index 0562a0d466..2270c90967 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4OpenChannelsHandler.java
@@ -25,16 +25,14 @@ import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.lease.Releasable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.metrics.CounterMetric;
import java.io.IOException;
import java.util.Collections;
-import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
@ChannelHandler.Sharable
public class Netty4OpenChannelsHandler extends ChannelInboundHandlerAdapter implements Releasable {
@@ -43,9 +41,9 @@ public class Netty4OpenChannelsHandler extends ChannelInboundHandlerAdapter impl
final CounterMetric openChannelsMetric = new CounterMetric();
final CounterMetric totalChannelsMetric = new CounterMetric();
- final ESLogger logger;
+ final Logger logger;
- public Netty4OpenChannelsHandler(ESLogger logger) {
+ public Netty4OpenChannelsHandler(Logger logger) {
this.logger = logger;
}
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java
index d7631acd6b..808592c58f 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java
@@ -33,12 +33,13 @@ import io.netty.channel.FixedRecvByteBufAllocator;
import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.oio.OioEventLoopGroup;
-import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.channel.socket.oio.OioServerSocketChannel;
import io.netty.channel.socket.oio.OioSocketChannel;
import io.netty.util.concurrent.Future;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -112,9 +113,9 @@ public class Netty4Transport extends TcpTransport<Channel> {
"transport.netty.receive_predictor_size",
settings -> {
long defaultReceiverPredictor = 512 * 1024;
- if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
+ if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) {
// we can guess a better default...
- long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / WORKER_COUNT.get(settings));
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / WORKER_COUNT.get(settings));
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
return new ByteSizeValue(defaultReceiverPredictor).toString();
@@ -151,11 +152,11 @@ public class Netty4Transport extends TcpTransport<Channel> {
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
- if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
- recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.bytes());
+ if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) {
+ recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes());
} else {
- recvByteBufAllocator = new AdaptiveRecvByteBufAllocator((int) receivePredictorMin.bytes(),
- (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
+ recvByteBufAllocator = new AdaptiveRecvByteBufAllocator((int) receivePredictorMin.getBytes(),
+ (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes());
}
}
@@ -207,13 +208,13 @@ public class Netty4Transport extends TcpTransport<Channel> {
bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
- if (tcpSendBufferSize.bytes() > 0) {
- bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.bytes()));
+ if (tcpSendBufferSize.getBytes() > 0) {
+ bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
- if (tcpReceiveBufferSize.bytes() > 0) {
- bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.bytes()));
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes()));
}
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
@@ -250,13 +251,13 @@ public class Netty4Transport extends TcpTransport<Channel> {
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size",
TCP_SEND_BUFFER_SIZE.get(settings));
- if (fallbackTcpSendBufferSize.bytes() >= 0) {
+ if (fallbackTcpSendBufferSize.getBytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
}
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size",
TCP_RECEIVE_BUFFER_SIZE.get(settings));
- if (fallbackTcpBufferSize.bytes() >= 0) {
+ if (fallbackTcpBufferSize.getBytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
}
@@ -272,7 +273,7 @@ public class Netty4Transport extends TcpTransport<Channel> {
connectionsPerNodePing, receivePredictorMin, receivePredictorMax);
}
- final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name);
+ final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name);
final ServerBootstrap serverBootstrap = new ServerBootstrap();
@@ -290,12 +291,12 @@ public class Netty4Transport extends TcpTransport<Channel> {
serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings);
- if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
- serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.bytes()));
+ if (tcpSendBufferSize != null && tcpSendBufferSize.getBytes() > 0) {
+ serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings);
- if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
+ if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.getBytes() > 0) {
serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.bytesAsInt()));
}
@@ -495,7 +496,9 @@ public class Netty4Transport extends TcpTransport<Channel> {
for (final Tuple<String, Future<?>> future : serverBootstrapCloseFutures) {
future.v2().awaitUninterruptibly();
if (!future.v2().isSuccess()) {
- logger.debug("Error closing server bootstrap for profile [{}]", future.v2().cause(), future.v1());
+ logger.debug(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause());
}
}
serverBootstraps.clear();
diff --git a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy
index a6fb99d1f6..a8cd1a7fff 100644
--- a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy
+++ b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy
@@ -17,7 +17,7 @@
* under the License.
*/
-grant codeBase "${codebase.netty-transport-4.1.4.Final.jar}" {
+grant codeBase "${codebase.netty-transport-4.1.5.Final.jar}" {
// Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854
// the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely!
permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write";
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java
index 155bbe4bb5..63e35a786c 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java
@@ -25,14 +25,12 @@ import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpVersion;
-import io.netty.handler.codec.http.QueryStringDecoder;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
@@ -51,10 +49,13 @@ import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.hasSize;
@@ -95,7 +96,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
final List<String> requests = new ArrayList<>(numberOfRequests);
for (int i = 0; i < numberOfRequests; i++) {
if (rarely()) {
- requests.add("/slow?sleep=" + scaledRandomIntBetween(500, 1000));
+ requests.add("/slow/" + i);
} else {
requests.add("/" + i);
}
@@ -120,32 +121,41 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
(InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
final int numberOfRequests = randomIntBetween(4, 16);
- final int numberOfSlowRequests = scaledRandomIntBetween(1, numberOfRequests);
+ final Set<Integer> slowIds = new HashSet<>();
final List<String> requests = new ArrayList<>(numberOfRequests);
- for (int i = 0; i < numberOfRequests - numberOfSlowRequests; i++) {
- requests.add("/" + i);
- }
- for (int i = 0; i < numberOfSlowRequests; i++) {
- requests.add("/slow?sleep=" + sleep(i));
+ int numberOfSlowRequests = 0;
+ for (int i = 0; i < numberOfRequests; i++) {
+ if (rarely()) {
+ requests.add("/slow/" + i);
+ slowIds.add(i);
+ numberOfSlowRequests++;
+ } else {
+ requests.add("/" + i);
+ }
}
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{}));
List<String> responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses));
- // we cannot be sure about the order of the fast requests, but the slow ones should have to be last
+ // we can not be sure about the order of the responses, but the slow ones should
+ // come last
assertThat(responseBodies, hasSize(numberOfRequests));
+ for (int i = 0; i < numberOfRequests - numberOfSlowRequests; i++) {
+ assertThat(responseBodies.get(i), matches("/\\d+"));
+ }
+
+ final Set<Integer> ids = new HashSet<>();
for (int i = 0; i < numberOfSlowRequests; i++) {
- assertThat(responseBodies.get(numberOfRequests - numberOfSlowRequests + i), equalTo("/slow?sleep=" + sleep(i)));
+ final String response = responseBodies.get(numberOfRequests - numberOfSlowRequests + i);
+ assertThat(response, matches("/slow/\\d+" ));
+ assertTrue(ids.add(Integer.parseInt(response.split("/")[2])));
}
+
+ assertThat(slowIds, equalTo(ids));
}
}
}
-
- private int sleep(int index) {
- return 500 + 100 * (index + 1);
- }
-
class CustomNettyHttpServerTransport extends Netty4HttpServerTransport {
private final ExecutorService executorService = Executors.newCachedThreadPool();
@@ -237,17 +247,15 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buffer);
httpResponse.headers().add(HttpHeaderNames.CONTENT_LENGTH, buffer.readableBytes());
- final QueryStringDecoder decoder = new QueryStringDecoder(uri);
-
- final int timeout =
- uri.startsWith("/slow") && decoder.parameters().containsKey("sleep") ?
- Integer.valueOf(decoder.parameters().get("sleep").get(0)) : 0;
- if (timeout > 0) {
+ final boolean slow = uri.matches("/slow/\\d+");
+ if (slow) {
try {
- Thread.sleep(timeout);
+ Thread.sleep(scaledRandomIntBetween(500, 1000));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
+ } else {
+ assert uri.matches("/\\d+");
}
if (pipelinedRequest != null) {
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
index 03f7e5fdab..0b8d5fb6a3 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
@@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportResponseOptions;
+import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
@@ -65,13 +66,13 @@ public class Netty4ScheduledPingTests extends ESTestCase {
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
- MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
+ MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
serviceA.start();
serviceA.acceptIncomingRequests();
final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
- MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
+ MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
serviceB.start();
serviceB.acceptIncomingRequests();
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java
index b913fad933..fe6498b08c 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java
@@ -25,7 +25,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.network.NetworkModule;
@@ -33,10 +32,12 @@ import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
@@ -45,6 +46,8 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is;
@@ -83,13 +86,19 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
public static final class ExceptionThrowingNetty4Transport extends Netty4Transport {
- public static class TestPlugin extends Plugin {
- public void onModule(NetworkModule module) {
- module.registerTransport("exception-throwing", ExceptionThrowingNetty4Transport.class);
+ public static class TestPlugin extends Plugin implements NetworkPlugin {
+
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap("exception-throwing",
+ () -> new ExceptionThrowingNetty4Transport(settings, threadPool, networkService, bigArrays,
+ namedWriteableRegistry, circuitBreakerService));
}
}
- @Inject
public ExceptionThrowingNetty4Transport(
Settings settings,
ThreadPool threadPool,
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
index 8902d6c109..3a3a4587ca 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
@@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.net.InetAddress;
@@ -55,7 +56,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
return version;
}
};
- return new MockTransportService(Settings.EMPTY, transport, threadPool);
+ return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
}
@Override
diff --git a/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yaml b/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yaml
index a7beac1491..ddf956e2c1 100644
--- a/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yaml
+++ b/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yaml
@@ -11,3 +11,9 @@
nodes.info: {}
- match: { nodes.$master.modules.1.name: transport-netty4 }
+
+ - do:
+ cluster.stats: {}
+
+ - match: { nodes.network_types.transport_types.netty4: 2 }
+ - match: { nodes.network_types.http_types.netty4: 2 }
diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0.jar.sha1
deleted file mode 100644
index 96fef36cf4..0000000000
--- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f580ed2ea0dca073199daa1a190ac142b3426030 \ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1
new file mode 100644
index 0000000000..2a734f79a3
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1
@@ -0,0 +1 @@
+68de5f298090b92aa9a803eb4f5aed0c9104e685 \ No newline at end of file
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java
index eac3ceebc1..14fa5922c1 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java
@@ -85,8 +85,8 @@ public class IcuTokenizerFactory extends AbstractTokenizerFactory {
String resourcePath = entry.getValue();
breakers[code] = parseRules(resourcePath, env);
}
- // cjkAsWords is not configurable yet.
- ICUTokenizerConfig config = new DefaultICUTokenizerConfig(true) {
+ // cjkAsWords nor myanmarAsWords are not configurable yet.
+ ICUTokenizerConfig config = new DefaultICUTokenizerConfig(true, true) {
@Override
public BreakIterator getBreakIterator(int script) {
if (breakers[script] != null) {
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java
index 180c426861..716d07385b 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java
@@ -42,9 +42,9 @@ import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStre
public class IcuTokenizerFactoryTests extends ESTestCase {
public void testSimpleIcuTokenizer() throws IOException {
- AnalysisService analysisService = createAnalysisService();
+ TestAnalysis analysis = createTestAnalysis();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("icu_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("icu_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
Reader reader = new StringReader("向日葵, one-two");
@@ -53,10 +53,10 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
public void testIcuCustomizeRuleFile() throws IOException {
- AnalysisService analysisService = createAnalysisService();
+ TestAnalysis analysis = createTestAnalysis();
// test the tokenizer with single rule file
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("user_rule_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("user_rule_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
Reader reader = new StringReader
("One-two punch. Brang-, not brung-it. This one--not that one--is the right one, -ish.");
@@ -68,10 +68,10 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
public void testMultipleIcuCustomizeRuleFiles() throws IOException {
- AnalysisService analysisService = createAnalysisService();
+ TestAnalysis analysis = createTestAnalysis();
// test the tokenizer with two rule files
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("multi_rule_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("multi_rule_tokenizer");
ICUTokenizer tokenizer = (ICUTokenizer) tokenizerFactory.create();
StringReader reader = new StringReader
("Some English. Немного русский. ข้อความภาษาไทยเล็ก ๆ น้อย ๆ More English.");
@@ -84,7 +84,7 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
}
- private static AnalysisService createAnalysisService() throws IOException {
+ private static TestAnalysis createTestAnalysis() throws IOException {
InputStream keywords = IcuTokenizerFactoryTests.class.getResourceAsStream("KeywordTokenizer.rbbi");
InputStream latin = IcuTokenizerFactoryTests.class.getResourceAsStream("Latin-dont-break-on-hyphens.rbbi");
@@ -102,6 +102,6 @@ public class IcuTokenizerFactoryTests extends ESTestCase {
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();
- return createAnalysisService(new Index("test", "_na_"), nodeSettings, settings, new AnalysisICUPlugin());
+ return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisICUPlugin());
}
}
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java
index 9255a250f1..86338e0670 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java
@@ -31,24 +31,24 @@ import static org.hamcrest.Matchers.instanceOf;
*/
public class SimpleIcuAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin());
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("icu_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("icu_tokenizer");
assertThat(tokenizerFactory, instanceOf(IcuTokenizerFactory.class));
- TokenFilterFactory filterFactory = analysisService.tokenFilter("icu_normalizer");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("icu_normalizer");
assertThat(filterFactory, instanceOf(IcuNormalizerTokenFilterFactory.class));
- filterFactory = analysisService.tokenFilter("icu_folding");
+ filterFactory = analysis.tokenFilter.get("icu_folding");
assertThat(filterFactory, instanceOf(IcuFoldingTokenFilterFactory.class));
- filterFactory = analysisService.tokenFilter("icu_collation");
+ filterFactory = analysis.tokenFilter.get("icu_collation");
assertThat(filterFactory, instanceOf(IcuCollationTokenFilterFactory.class));
- filterFactory = analysisService.tokenFilter("icu_transform");
+ filterFactory = analysis.tokenFilter.get("icu_transform");
assertThat(filterFactory, instanceOf(IcuTransformTokenFilterFactory.class));
- CharFilterFactory charFilterFactory = analysisService.charFilter("icu_normalizer");
+ CharFilterFactory charFilterFactory = analysis.charFilter.get("icu_normalizer");
assertThat(charFilterFactory, instanceOf(IcuNormalizerCharFilterFactory.class));
}
}
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java
index 62e9c9db14..8f9a38dc8f 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java
@@ -50,9 +50,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.language", "tr")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I WİLL USE TURKİSH CASING", "ı will use turkish casıng");
}
@@ -66,9 +66,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.decomposition", "canonical")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng");
}
@@ -82,9 +82,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "secondary")
.put("index.analysis.filter.myCollator.decomposition", "no")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "TESTING", "testing");
}
@@ -99,9 +99,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.alternate", "shifted")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo-bar", "foo bar");
}
@@ -117,9 +117,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.alternate", "shifted")
.put("index.analysis.filter.myCollator.variableTop", " ")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo bar", "foobar");
// now assert that punctuation still matters: foo-bar < foo bar
assertCollation(filterFactory, "foo-bar", "foo bar", -1);
@@ -135,9 +135,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.numeric", "true")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "foobar-9", "foobar-10", -1);
}
@@ -152,9 +152,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.caseLevel", "true")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "résumé", "resume");
assertCollatesToSame(filterFactory, "Résumé", "Resume");
// now assert that case still matters: resume < Resume
@@ -172,9 +172,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.strength", "tertiary")
.put("index.analysis.filter.myCollator.caseFirst", "upper")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "Resume", "resume", -1);
}
@@ -200,9 +200,9 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
.put("index.analysis.filter.myCollator.rules", tailoredRules)
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "Töne", "Toene");
}
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java
index b82accf0cf..32cbabfc9a 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java
@@ -37,8 +37,8 @@ public class SimpleIcuNormalizerCharFilterTests extends ESTestCase {
Settings settings = Settings.builder()
.put("index.analysis.char_filter.myNormalizerChar.type", "icu_normalizer")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar");
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");
String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Normalizer2 normalizer = Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE);
@@ -61,8 +61,8 @@ public class SimpleIcuNormalizerCharFilterTests extends ESTestCase {
.put("index.analysis.char_filter.myNormalizerChar.name", "nfkc")
.put("index.analysis.char_filter.myNormalizerChar.mode", "decompose")
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
- CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar");
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
+ CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");
String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Normalizer2 normalizer = Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE);
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0.jar.sha1
deleted file mode 100644
index bb0e327d27..0000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dc436a2a2324e95fb27678d85ca6fd5018a5cec6 \ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1
new file mode 100644
index 0000000000..749cb8ecde
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1
@@ -0,0 +1 @@
+17ee76df332c0342a172790472b777086487a299 \ No newline at end of file
diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java
index 7b760bc4f6..d10fe4089f 100644
--- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java
+++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/JapaneseStopTokenFilterFactory.java
@@ -20,10 +20,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.ja.JapaneseAnalyzer;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java
index 21d9b80405..0c6ab2d3ea 100644
--- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java
+++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java
@@ -19,10 +19,10 @@
package org.elasticsearch.index.analysis;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.ja.JapaneseAnalyzer;
import org.apache.lucene.analysis.ja.JapaneseTokenizer;
import org.apache.lucene.analysis.ja.dict.UserDictionary;
-import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java
index 53196ac746..2da9416fbd 100644
--- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java
+++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java
@@ -48,44 +48,45 @@ import static org.hamcrest.Matchers.notNullValue;
*/
public class KuromojiAnalysisTests extends ESTestCase {
public void testDefaultsKuromojiAnalysis() throws IOException {
- AnalysisService analysisService = createAnalysisService();
+ TestAnalysis analysis = createTestAnalysis();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_tokenizer");
assertThat(tokenizerFactory, instanceOf(KuromojiTokenizerFactory.class));
- TokenFilterFactory filterFactory = analysisService.tokenFilter("kuromoji_part_of_speech");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("kuromoji_part_of_speech");
assertThat(filterFactory, instanceOf(KuromojiPartOfSpeechFilterFactory.class));
- filterFactory = analysisService.tokenFilter("kuromoji_readingform");
+ filterFactory = analysis.tokenFilter.get("kuromoji_readingform");
assertThat(filterFactory, instanceOf(KuromojiReadingFormFilterFactory.class));
- filterFactory = analysisService.tokenFilter("kuromoji_baseform");
+ filterFactory = analysis.tokenFilter.get("kuromoji_baseform");
assertThat(filterFactory, instanceOf(KuromojiBaseFormFilterFactory.class));
- filterFactory = analysisService.tokenFilter("kuromoji_stemmer");
+ filterFactory = analysis.tokenFilter.get("kuromoji_stemmer");
assertThat(filterFactory, instanceOf(KuromojiKatakanaStemmerFactory.class));
- filterFactory = analysisService.tokenFilter("ja_stop");
+ filterFactory = analysis.tokenFilter.get("ja_stop");
assertThat(filterFactory, instanceOf(JapaneseStopTokenFilterFactory.class));
- filterFactory = analysisService.tokenFilter("kuromoji_number");
+ filterFactory = analysis.tokenFilter.get("kuromoji_number");
assertThat(filterFactory, instanceOf(KuromojiNumberFilterFactory.class));
- NamedAnalyzer analyzer = analysisService.analyzer("kuromoji");
+ IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
+ NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji");
assertThat(analyzer.analyzer(), instanceOf(JapaneseAnalyzer.class));
- analyzer = analysisService.analyzer("my_analyzer");
+ analyzer = indexAnalyzers.get("my_analyzer");
assertThat(analyzer.analyzer(), instanceOf(CustomAnalyzer.class));
assertThat(analyzer.analyzer().tokenStream(null, new StringReader("")), instanceOf(JapaneseTokenizer.class));
- CharFilterFactory charFilterFactory = analysisService.charFilter("kuromoji_iteration_mark");
+ CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_iteration_mark");
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
}
public void testBaseFormFilterFactory() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_pos");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_pos");
assertThat(tokenFilter, instanceOf(KuromojiPartOfSpeechFilterFactory.class));
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"私", "は", "制限", "スピード", "を"};
@@ -95,8 +96,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testReadingFormFilterFactory() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_rf");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_rf");
assertThat(tokenFilter, instanceOf(KuromojiReadingFormFilterFactory.class));
String source = "今夜はロバート先生と話した";
String[] expected_tokens_romaji = new String[]{"kon'ya", "ha", "robato", "sensei", "to", "hanashi", "ta"};
@@ -109,14 +110,14 @@ public class KuromojiAnalysisTests extends ESTestCase {
tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH);
tokenizer.setReader(new StringReader(source));
String[] expected_tokens_katakana = new String[]{"コンヤ", "ハ", "ロバート", "センセイ", "ト", "ハナシ", "タ"};
- tokenFilter = analysisService.tokenFilter("kuromoji_readingform");
+ tokenFilter = analysis.tokenFilter.get("kuromoji_readingform");
assertThat(tokenFilter, instanceOf(KuromojiReadingFormFilterFactory.class));
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana);
}
public void testKatakanaStemFilter() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_stemmer");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_stemmer");
assertThat(tokenFilter, instanceOf(KuromojiKatakanaStemmerFactory.class));
String source = "明後日パーティーに行く予定がある。図書館で資料をコピーしました。";
@@ -128,7 +129,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
String[] expected_tokens_katakana = new String[]{"明後日", "パーティ", "に", "行く", "予定", "が", "ある", "図書館", "で", "資料", "を", "コピー", "し", "まし", "た"};
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana);
- tokenFilter = analysisService.tokenFilter("kuromoji_ks");
+ tokenFilter = analysis.tokenFilter.get("kuromoji_ks");
assertThat(tokenFilter, instanceOf(KuromojiKatakanaStemmerFactory.class));
tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH);
tokenizer.setReader(new StringReader(source));
@@ -140,9 +141,9 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testIterationMarkCharFilter() throws IOException {
- AnalysisService analysisService = createAnalysisService();
+ TestAnalysis analysis = createTestAnalysis();
// test only kanji
- CharFilterFactory charFilterFactory = analysisService.charFilter("kuromoji_im_only_kanji");
+ CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_im_only_kanji");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@@ -153,7 +154,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
// test only kana
- charFilterFactory = analysisService.charFilter("kuromoji_im_only_kana");
+ charFilterFactory = analysis.charFilter.get("kuromoji_im_only_kana");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@@ -163,7 +164,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
// test default
- charFilterFactory = analysisService.charFilter("kuromoji_im_default");
+ charFilterFactory = analysis.charFilter.get("kuromoji_im_default");
assertNotNull(charFilterFactory);
assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class));
@@ -173,8 +174,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testJapaneseStopFilterFactory() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("ja_stop");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("ja_stop");
assertThat(tokenFilter, instanceOf(JapaneseStopTokenFilterFactory.class));
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"私", "制限", "超える"};
@@ -183,7 +184,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected);
}
- private static AnalysisService createAnalysisService() throws IOException {
+ private static TestAnalysis createTestAnalysis() throws IOException {
InputStream empty_dict = KuromojiAnalysisTests.class.getResourceAsStream("empty_user_dict.txt");
InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt");
Path home = createTempDir();
@@ -198,7 +199,7 @@ public class KuromojiAnalysisTests extends ESTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();
- return createAnalysisService(new Index("test", "_na_"), nodeSettings, settings, new AnalysisKuromojiPlugin());
+ return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisKuromojiPlugin());
}
public static void assertSimpleTSOutput(TokenStream stream,
@@ -230,8 +231,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testKuromojiUserDict() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_user_dict");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_user_dict");
String source = "私は制限スピードを超える。";
String[] expected = new String[]{"私", "は", "制限スピード", "を", "超える"};
@@ -242,14 +243,14 @@ public class KuromojiAnalysisTests extends ESTestCase {
// fix #59
public void testKuromojiEmptyUserDict() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_empty_user_dict");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_empty_user_dict");
assertThat(tokenizerFactory, instanceOf(KuromojiTokenizerFactory.class));
}
public void testNbestCost() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_cost");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_cost");
String source = "鳩山積み";
String[] expected = new String[] {"鳩", "鳩山", "山積み", "積み"};
@@ -259,8 +260,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNbestExample() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_examples");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_examples");
String source = "鳩山積み";
String[] expected = new String[] {"鳩", "鳩山", "山積み", "積み"};
@@ -270,8 +271,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNbestBothOptions() throws IOException {
- AnalysisService analysisService = createAnalysisService();
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("kuromoji_nbest_both");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("kuromoji_nbest_both");
String source = "鳩山積み";
String[] expected = new String[] {"鳩", "鳩山", "山積み", "積み"};
@@ -282,8 +283,8 @@ public class KuromojiAnalysisTests extends ESTestCase {
}
public void testNumberFilterFactory() throws Exception {
- AnalysisService analysisService = createAnalysisService();
- TokenFilterFactory tokenFilter = analysisService.tokenFilter("kuromoji_number");
+ TestAnalysis analysis = createTestAnalysis();
+ TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_number");
assertThat(tokenFilter, instanceOf(KuromojiNumberFilterFactory.class));
String source = "本日十万二千五百円のワインを買った";
String[] expected = new String[]{"本日", "102500", "円", "の", "ワイン", "を", "買っ", "た"};
diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0.jar.sha1
deleted file mode 100644
index 94556c2405..0000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1debc0cb187cde2bb2bcb3fc8a468f820d25b440 \ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1
new file mode 100644
index 0000000000..359173e008
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1
@@ -0,0 +1 @@
+8d2a6b8679563d9f044eb1cee580282b20d8e149 \ No newline at end of file
diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/Nysiis.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/Nysiis.java
index 894d5d8bd3..62e2174af8 100644
--- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/Nysiis.java
+++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/Nysiis.java
@@ -263,7 +263,7 @@ public class Nysiis implements StringEncoder {
str = PAT_DT_ETC.matcher(str).replaceFirst("D");
// First character of key = first character of name.
- StringBuffer key = new StringBuffer(str.length());
+ StringBuilder key = new StringBuilder(str.length());
key.append(str.charAt(0));
// Transcode remaining characters, incrementing by one character each time
diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java
index 3dcfadce78..b0c23e29ab 100644
--- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java
+++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java
@@ -39,8 +39,8 @@ public class SimplePhoneticAnalysisTests extends ESTestCase {
Settings settings = Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml))
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisPhoneticPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("phonetic");
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisPhoneticPlugin());
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("phonetic");
MatcherAssert.assertThat(filterFactory, instanceOf(PhoneticTokenFilterFactory.class));
}
}
diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0.jar.sha1
deleted file mode 100644
index 5abfc22ee1..0000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8e898fbd5da085f7b041feb3537a34137d2db560 \ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1
new file mode 100644
index 0000000000..66e339bfa2
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1
@@ -0,0 +1 @@
+ba3fd99d1cf47d31b82817accdb199fc7a8d838d \ No newline at end of file
diff --git a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java
index 08aebdee2b..e2d6f6db51 100644
--- a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java
+++ b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java
@@ -31,9 +31,9 @@ import static org.hamcrest.Matchers.instanceOf;
public class SimpleSmartChineseAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
- final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY,
+ final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY,
new AnalysisSmartChinesePlugin());
- TokenizerFactory tokenizerFactory = analysisService.tokenizer("smartcn_tokenizer");
+ TokenizerFactory tokenizerFactory = analysis.tokenizer.get("smartcn_tokenizer");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(SmartChineseTokenizerTokenizerFactory.class));
}
}
diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0.jar.sha1
deleted file mode 100644
index ac449309ee..0000000000
--- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-51a52b0cd4ba5e686201917e65393feb56afd3a7 \ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1
new file mode 100644
index 0000000000..5cfb071f3a
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1
@@ -0,0 +1 @@
+09d2a759a765f73e2e7becbc560411469c464cfa \ No newline at end of file
diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java
index 4f7ee642eb..d0b81f01d0 100644
--- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java
+++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java
@@ -36,12 +36,12 @@ import static org.hamcrest.Matchers.instanceOf;
*/
public class PolishAnalysisTests extends ESTestCase {
public void testDefaultsPolishAnalysis() throws IOException {
- final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY,
+ final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY,
new AnalysisStempelPlugin());
- TokenFilterFactory tokenizerFactory = analysisService.tokenFilter("polish_stem");
+ TokenFilterFactory tokenizerFactory = analysis.tokenFilter.get("polish_stem");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(PolishStemTokenFilterFactory.class));
- Analyzer analyzer = analysisService.analyzer("polish").analyzer();
+ Analyzer analyzer = analysis.indexAnalyzers.get("polish").analyzer();
MatcherAssert.assertThat(analyzer, instanceOf(PolishAnalyzer.class));
}
}
diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java
index 3fc12ccdfe..26f02c9df4 100644
--- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java
+++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java
@@ -49,9 +49,9 @@ public class SimplePolishTokenFilterTests extends ESTestCase {
Settings settings = Settings.builder()
.put("index.analysis.filter.myStemmer.type", "polish_stem")
.build();
- AnalysisService analysisService = createAnalysisService(index, settings, new AnalysisStempelPlugin());
+ TestAnalysis analysis = createTestAnalysis(index, settings, new AnalysisStempelPlugin());
- TokenFilterFactory filterFactory = analysisService.tokenFilter("myStemmer");
+ TokenFilterFactory filterFactory = analysis.tokenFilter.get("myStemmer");
Tokenizer tokenizer = new KeywordTokenizer();
tokenizer.setReader(new StringReader(source));
@@ -65,9 +65,9 @@ public class SimplePolishTokenFilterTests extends ESTestCase {
}
private void testAnalyzer(String source, String... expected_terms) throws IOException {
- AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin());
+ TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin());
- Analyzer analyzer = analysisService.analyzer("polish").analyzer();
+ Analyzer analyzer = analysis.indexAnalyzers.get("polish").analyzer();
TokenStream ts = analyzer.tokenStream("test", source);
diff --git a/plugins/build.gradle b/plugins/build.gradle
index e49b08c601..8866dfc8ee 100644
--- a/plugins/build.gradle
+++ b/plugins/build.gradle
@@ -22,7 +22,6 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
group = 'org.elasticsearch.plugin'
apply plugin: 'elasticsearch.esplugin'
- apply plugin: 'com.bmuschko.nexus'
esplugin {
// for local ES plugins, the name of the plugin is the same as the directory
diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java
index da684fd824..7ee62dd877 100644
--- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java
+++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java
@@ -19,13 +19,13 @@
package org.elasticsearch.cloud.azure.classic;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cloud.azure.classic.management.AzureComputeService;
import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -43,7 +43,7 @@ import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin;
* @see AzureComputeServiceImpl
*/
public class AzureDiscoveryModule extends AbstractModule {
- protected final ESLogger logger;
+ protected final Logger logger;
private Settings settings;
// pkg private so it is settable by tests
@@ -69,7 +69,7 @@ public class AzureDiscoveryModule extends AbstractModule {
* Check if discovery is meant to start
* @return true if we can start discovery features
*/
- public static boolean isDiscoveryReady(Settings settings, ESLogger logger) {
+ public static boolean isDiscoveryReady(Settings settings, Logger logger) {
// User set discovery.type: azure
if (!AzureDiscoveryPlugin.AZURE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) {
logger.trace("discovery.type not set to {}", AzureDiscoveryPlugin.AZURE);
diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java
index 4c0ac17331..db5c1cc5c4 100644
--- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java
+++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java
@@ -19,11 +19,11 @@
package org.elasticsearch.plugin.discovery.azure.classic;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cloud.azure.classic.AzureDiscoveryModule;
import org.elasticsearch.cloud.azure.classic.management.AzureComputeService;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -41,7 +41,7 @@ public class AzureDiscoveryPlugin extends Plugin {
public static final String AZURE = "azure";
private final Settings settings;
- protected final ESLogger logger = Loggers.getLogger(AzureDiscoveryPlugin.class);
+ protected final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class);
public AzureDiscoveryPlugin(Settings settings) {
this.settings = settings;
diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle
index 506215708e..9ad2f8d02e 100644
--- a/plugins/discovery-ec2/build.gradle
+++ b/plugins/discovery-ec2/build.gradle
@@ -42,6 +42,12 @@ dependencyLicenses {
mapping from: /jackson-.*/, to: 'jackson'
}
+bundlePlugin {
+ from('config/discovery-ec2') {
+ into 'config'
+ }
+}
+
test {
// this is needed for insecure plugins, remove if possible!
systemProperty 'tests.artifact', project.name
diff --git a/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties b/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties
new file mode 100644
index 0000000000..aa52f0232e
--- /dev/null
+++ b/plugins/discovery-ec2/config/discovery-ec2/log4j2.properties
@@ -0,0 +1,8 @@
+logger.com_amazonaws.name = com.amazonaws
+logger.com_amazonaws.level = warn
+
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
+
+logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
+logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java
index e35b082899..1a4bf278f3 100644
--- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java
+++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java
@@ -30,14 +30,12 @@ import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.retry.RetryPolicy;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.cloud.aws.network.Ec2NameResolver;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import java.util.Random;
@@ -71,7 +69,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws
return this.client;
}
- protected static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings) {
+ protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) {
AWSCredentialsProvider credentials;
String key = CLOUD_EC2.KEY_SETTING.get(settings);
@@ -87,7 +85,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws
return credentials;
}
- protected static ClientConfiguration buildConfiguration(ESLogger logger, Settings settings) {
+ protected static ClientConfiguration buildConfiguration(Logger logger, Settings settings) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
// the response metadata cache is only there for diagnostics purposes,
// but can force objects from every response to the old generation.
@@ -135,7 +133,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws
return clientConfiguration;
}
- protected static String findEndpoint(ESLogger logger, Settings settings) {
+ protected static String findEndpoint(Logger logger, Settings settings) {
String endpoint = null;
if (CLOUD_EC2.ENDPOINT_SETTING.exists(settings)) {
endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings);
diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
index a76a2b04a9..11732725e9 100644
--- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
+++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
@@ -21,12 +21,12 @@ package org.elasticsearch.cloud.aws;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.SignerFactory;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
public class AwsSigner {
- private static final ESLogger logger = Loggers.getLogger(AwsSigner.class);
+ private static final Logger logger = Loggers.getLogger(AwsSigner.class);
private AwsSigner() {
diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java
index 1f3043fe6d..6d4fcdc4c8 100644
--- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java
+++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java
@@ -27,6 +27,8 @@ import com.amazonaws.services.ec2.model.Filter;
import com.amazonaws.services.ec2.model.GroupIdentifier;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.Reservation;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.Version;
import org.elasticsearch.cloud.aws.AwsEc2Service;
import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2;
@@ -175,7 +177,10 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni
emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
- logger.warn("failed ot add {}, address {}", e, instance.getInstanceId(), address);
+ final String finalAddress = address;
+ logger.warn(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("failed to add {}, address {}", instance.getInstanceId(), finalAddress), e);
}
} else {
logger.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType);
diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java
index 346372f554..7f8e983e52 100644
--- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java
+++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java
@@ -19,21 +19,7 @@
package org.elasticsearch.plugin.discovery.ec2;
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.UncheckedIOException;
-import java.net.URL;
-import java.net.URLConnection;
-import java.nio.charset.StandardCharsets;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cloud.aws.AwsEc2Service;
import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl;
@@ -41,9 +27,7 @@ import org.elasticsearch.cloud.aws.Ec2Module;
import org.elasticsearch.cloud.aws.network.Ec2NameResolver;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -54,12 +38,27 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.Plugin;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.UncheckedIOException;
+import java.net.URL;
+import java.net.URLConnection;
+import java.nio.charset.StandardCharsets;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
/**
*
*/
public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin {
- private static ESLogger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class);
+ private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class);
public static final String EC2 = "ec2";
diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java
index 49f4f88541..050a25bb18 100644
--- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java
+++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java
@@ -373,7 +373,7 @@ import com.amazonaws.services.ec2.model.TerminateInstancesResult;
import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesRequest;
import com.amazonaws.services.ec2.model.UnmonitorInstancesRequest;
import com.amazonaws.services.ec2.model.UnmonitorInstancesResult;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.util.ArrayList;
@@ -386,7 +386,7 @@ import java.util.regex.Pattern;
public class AmazonEC2Mock implements AmazonEC2 {
- private static final ESLogger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName());
+ private static final Logger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName());
public static final String PREFIX_PRIVATE_IP = "10.0.0.";
public static final String PREFIX_PUBLIC_IP = "8.8.8.";
diff --git a/plugins/discovery-file/build.gradle b/plugins/discovery-file/build.gradle
new file mode 100644
index 0000000000..3b78f06505
--- /dev/null
+++ b/plugins/discovery-file/build.gradle
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.test.ClusterConfiguration
+import org.elasticsearch.gradle.test.ClusterFormationTasks
+import org.elasticsearch.gradle.test.NodeInfo
+
+esplugin {
+ description 'Discovery file plugin enables unicast discovery from hosts stored in a file.'
+ classname 'org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin'
+}
+
+bundlePlugin {
+ from('config/discovery-file') {
+ into 'config'
+ }
+}
+
+task setupSeedNodeAndUnicastHostsFile(type: DefaultTask) {
+ mustRunAfter(precommit)
+}
+// setup the initial cluster with one node that will serve as the seed node
+// for unicast discovery
+ClusterConfiguration config = new ClusterConfiguration(project)
+config.clusterName = 'discovery-file-test-cluster'
+List<NodeInfo> nodes = ClusterFormationTasks.setup(project, setupSeedNodeAndUnicastHostsFile, config)
+File srcUnicastHostsFile = file('build/cluster/unicast_hosts.txt')
+
+// write the unicast_hosts.txt file to a temporary location to be used by the second cluster
+setupSeedNodeAndUnicastHostsFile.doLast {
+ // write the unicast_hosts.txt file to a temp file in the build directory
+ srcUnicastHostsFile.setText(nodes.get(0).transportUri(), 'UTF-8')
+}
+
+// second cluster, which will connect to the first via the unicast_hosts.txt file
+integTest {
+ dependsOn(setupSeedNodeAndUnicastHostsFile)
+ cluster {
+ clusterName = 'discovery-file-test-cluster'
+ extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile
+ }
+ finalizedBy ':plugins:discovery-file:setupSeedNodeAndUnicastHostsFile#stop'
+}
diff --git a/plugins/discovery-file/config/discovery-file/unicast_hosts.txt b/plugins/discovery-file/config/discovery-file/unicast_hosts.txt
new file mode 100644
index 0000000000..5e265e0f29
--- /dev/null
+++ b/plugins/discovery-file/config/discovery-file/unicast_hosts.txt
@@ -0,0 +1,20 @@
+# The unicast_hosts.txt file contains the list of unicast hosts to connect to
+# for pinging during the discovery process, when using the file-based discovery
+# mechanism. This file should contain one entry per line, where an entry is a
+# host/port combination. The host and port should be separated by a `:`. If
+# the port is left off, a default port of 9300 is assumed. For example, if the
+# cluster has three nodes that participate in the discovery process:
+# (1) 66.77.88.99 running on port 9300 (2) 66.77.88.100 running on port 9305
+# and (3) 66.77.88.99 running on port 10005, then this file should contain the
+# following text:
+#
+#10.10.10.5
+#10.10.10.6:9305
+#10.10.10.5:10005
+#
+# For IPv6 addresses, make sure to put a bracket around the host part of the address,
+# for example: [2001:cdba:0000:0000:0000:0000:3257:9652]:9301 (where 9301 is the port).
+#
+# NOTE: all lines starting with a `#` are comments, and comments must exist
+# on lines of their own (i.e. comments cannot begin in the middle of a line)
+# \ No newline at end of file
diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java
new file mode 100644
index 0000000000..f781a3b7fe
--- /dev/null
+++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.file;
+
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.plugins.DiscoveryPlugin;
+import org.elasticsearch.plugins.Plugin;
+
+/**
+ * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts
+ * is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in
+ * the {@link Environment#configFile()}/discovery-file directory.
+ */
+public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin {
+
+ private static final Logger logger = Loggers.getLogger(FileBasedDiscoveryPlugin.class);
+
+ private final Settings settings;
+
+ public FileBasedDiscoveryPlugin(Settings settings) {
+ this.settings = settings;
+ logger.trace("starting file-based discovery plugin...");
+ }
+
+ public void onModule(DiscoveryModule discoveryModule) {
+ logger.trace("registering file-based unicast hosts provider");
+ // using zen discovery for the discovery type and we're just adding a unicast host provider for it
+ discoveryModule.addUnicastHostProvider("zen", FileBasedUnicastHostsProvider.class);
+ }
+}
diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java
new file mode 100644
index 0000000000..78393d3400
--- /dev/null
+++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.file;
+
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.resolveDiscoveryNodes;
+
+/**
+ * An implementation of {@link UnicastHostsProvider} that reads hosts/ports
+ * from {@link #UNICAST_HOSTS_FILE}.
+ *
+ * Each unicast host/port that is part of the discovery process must be listed on
+ * a separate line. If the port is left off an entry, a default port of 9300 is
+ * assumed. An example unicast hosts file could read:
+ *
+ * 67.81.244.10
+ * 67.81.244.11:9305
+ * 67.81.244.15:9400
+ */
+public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
+
+ static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt";
+ static final String UNICAST_HOST_PREFIX = "#zen_file_unicast_host_";
+
+ private final TransportService transportService;
+
+ private final Path unicastHostsFilePath;
+
+ private final AtomicLong nodeIdGenerator = new AtomicLong(); // generates unique ids for the node
+
+ @Inject
+ public FileBasedUnicastHostsProvider(Settings settings, TransportService transportService) {
+ super(settings);
+ this.transportService = transportService;
+ this.unicastHostsFilePath = new Environment(settings).configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE);
+ }
+
+ @Override
+ public List<DiscoveryNode> buildDynamicNodes() {
+ List<String> hostsList;
+ try (Stream<String> lines = Files.lines(unicastHostsFilePath)) {
+ hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments
+ .collect(Collectors.toList());
+ } catch (FileNotFoundException | NoSuchFileException e) {
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]",
+ unicastHostsFilePath), e);
+ hostsList = Collections.emptyList();
+ } catch (IOException e) {
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]",
+ unicastHostsFilePath), e);
+ hostsList = Collections.emptyList();
+ }
+
+ final List<DiscoveryNode> discoNodes = new ArrayList<>();
+ for (final String host : hostsList) {
+ try {
+ discoNodes.addAll(resolveDiscoveryNodes(host, 1, transportService,
+ () -> UNICAST_HOST_PREFIX + nodeIdGenerator.incrementAndGet() + "#"));
+ } catch (IllegalArgumentException e) {
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Failed to parse transport address from [{}]",
+ host), e);
+ continue;
+ }
+ }
+
+ logger.debug("[discovery-file] Using dynamic discovery nodes {}", discoNodes);
+
+ return discoNodes;
+ }
+
+}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MapperAttachmentsClientYamlTestSuiteIT.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java
index 53a5c5c281..45905a152c 100644
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MapperAttachmentsClientYamlTestSuiteIT.java
+++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java
@@ -17,26 +17,27 @@
* under the License.
*/
-package org.elasticsearch.mapper.attachments;
+package org.elasticsearch.discovery.file;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException;
import java.io.IOException;
-public class MapperAttachmentsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
+/**
+ * Integration tests to make sure the file-based discovery plugin works in a cluster.
+ */
+public class FileBasedDiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
- public MapperAttachmentsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
+ public FileBasedDiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
- return createParameters(0, 1);
+ return ESClientYamlSuiteTestCase.createParameters(0, 1);
}
}
-
diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java
new file mode 100644
index 0000000000..f38ae218ec
--- /dev/null
+++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.file;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.TestThreadPool;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.MockTcpTransport;
+import org.elasticsearch.transport.TransportService;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE;
+import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOST_PREFIX;
+
+/**
+ * Tests for {@link FileBasedUnicastHostsProvider}.
+ */
+public class FileBasedUnicastHostsProviderTests extends ESTestCase {
+
+ private static ThreadPool threadPool;
+ private MockTransportService transportService;
+
+ @BeforeClass
+ public static void createThreadPool() {
+ threadPool = new TestThreadPool(FileBasedUnicastHostsProviderTests.class.getName());
+ }
+
+ @AfterClass
+ public static void stopThreadPool() throws InterruptedException {
+ terminate(threadPool);
+ }
+
+ @Before
+ public void createTransportSvc() {
+ MockTcpTransport transport =
+ new MockTcpTransport(Settings.EMPTY,
+ threadPool,
+ BigArrays.NON_RECYCLING_INSTANCE,
+ new NoneCircuitBreakerService(),
+ new NamedWriteableRegistry(Collections.emptyList()),
+ new NetworkService(Settings.EMPTY, Collections.emptyList()));
+ transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
+ }
+
+ public void testBuildDynamicNodes() throws Exception {
+ final List<String> hostEntries = Arrays.asList("#comment, should be ignored", "192.168.0.1", "192.168.0.2:9305", "255.255.23.15");
+ final List<DiscoveryNode> nodes = setupAndRunHostProvider(hostEntries);
+ assertEquals(hostEntries.size() - 1, nodes.size()); // minus 1 because we are ignoring the first line that's a comment
+ assertEquals("192.168.0.1", nodes.get(0).getAddress().getHost());
+ assertEquals(9300, nodes.get(0).getAddress().getPort());
+ assertEquals(UNICAST_HOST_PREFIX + "1#", nodes.get(0).getId());
+ assertEquals("192.168.0.2", nodes.get(1).getAddress().getHost());
+ assertEquals(9305, nodes.get(1).getAddress().getPort());
+ assertEquals(UNICAST_HOST_PREFIX + "2#", nodes.get(1).getId());
+ assertEquals("255.255.23.15", nodes.get(2).getAddress().getHost());
+ assertEquals(9300, nodes.get(2).getAddress().getPort());
+ assertEquals(UNICAST_HOST_PREFIX + "3#", nodes.get(2).getId());
+ }
+
+ public void testEmptyUnicastHostsFile() throws Exception {
+ final List<String> hostEntries = Collections.emptyList();
+ final List<DiscoveryNode> nodes = setupAndRunHostProvider(hostEntries);
+ assertEquals(0, nodes.size());
+ }
+
+ public void testUnicastHostsDoesNotExist() throws Exception {
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
+ .build();
+ final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, transportService);
+ final List<DiscoveryNode> nodes = provider.buildDynamicNodes();
+ assertEquals(0, nodes.size());
+ }
+
+ public void testInvalidHostEntries() throws Exception {
+ List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300");
+ List<DiscoveryNode> nodes = setupAndRunHostProvider(hostEntries);
+ assertEquals(0, nodes.size());
+ }
+
+ public void testSomeInvalidHostEntries() throws Exception {
+ List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301");
+ List<DiscoveryNode> nodes = setupAndRunHostProvider(hostEntries);
+ assertEquals(1, nodes.size()); // only one of the two is valid and will be used
+ assertEquals("192.168.0.1", nodes.get(0).getAddress().getHost());
+ assertEquals(9301, nodes.get(0).getAddress().getPort());
+ }
+
+ // sets up the config dir, writes to the unicast hosts file in the config dir,
+ // and then runs the file-based unicast host provider to get the list of discovery nodes
+ private List<DiscoveryNode> setupAndRunHostProvider(final List<String> hostEntries) throws IOException {
+ final Path homeDir = createTempDir();
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_HOME_SETTING.getKey(), homeDir)
+ .build();
+ final Path configDir = homeDir.resolve("config").resolve("discovery-file");
+ Files.createDirectories(configDir);
+ final Path unicastHostsPath = configDir.resolve(UNICAST_HOSTS_FILE);
+ try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) {
+ writer.write(String.join("\n", hostEntries));
+ }
+
+ return new FileBasedUnicastHostsProvider(settings, transportService).buildDynamicNodes();
+ }
+}
diff --git a/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yaml b/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yaml
new file mode 100644
index 0000000000..138115da11
--- /dev/null
+++ b/plugins/discovery-file/src/test/resources/rest-api-spec/test/discovery_file/10_basic.yaml
@@ -0,0 +1,13 @@
+# Integration tests for file-based discovery
+#
+"Ensure cluster formed successfully with discovery file":
+ # make sure both nodes joined the cluster
+ - do:
+ cluster.health:
+ wait_for_nodes: 2
+
+ # make sure the cluster was formed with the correct name
+ - do:
+ cluster.state: {}
+
+ - match: { cluster_name: 'discovery-file-test-cluster' } # correct cluster name, we formed the cluster we expected to
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java
index 5ec4b18e91..c6c7b9a0ae 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java
@@ -28,6 +28,8 @@ import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.services.compute.Compute;
import com.google.api.services.compute.model.Instance;
import com.google.api.services.compute.model.InstanceList;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@@ -82,7 +84,7 @@ public class GceInstancesServiceImpl extends AbstractLifecycleComponent implemen
return instanceList.isEmpty() || instanceList.getItems() == null ?
Collections.<Instance>emptyList() : instanceList.getItems();
} catch (PrivilegedActionException e) {
- logger.warn("Problem fetching instance list for zone {}", e, zoneId);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Problem fetching instance list for zone {}", zoneId), e);
logger.debug("Full exception:", e);
// assist type inference
return Collections.<Instance>emptyList();
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java
index 81d10c756e..71e9fbc780 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java
@@ -19,8 +19,8 @@
package org.elasticsearch.cloud.gce;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@@ -29,7 +29,7 @@ public class GceModule extends AbstractModule {
static Class<? extends GceInstancesService> computeServiceImpl = GceInstancesServiceImpl.class;
protected final Settings settings;
- protected final ESLogger logger = Loggers.getLogger(GceModule.class);
+ protected final Logger logger = Loggers.getLogger(GceModule.class);
public GceModule(Settings settings) {
this.settings = settings;
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java
index c73df8f839..3426e74d4a 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java
@@ -22,6 +22,8 @@ package org.elasticsearch.discovery.gce;
import com.google.api.services.compute.model.AccessConfig;
import com.google.api.services.compute.model.Instance;
import com.google.api.services.compute.model.NetworkInterface;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.Version;
import org.elasticsearch.cloud.gce.GceInstancesService;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -245,7 +247,8 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas
}
}
} catch (Exception e) {
- logger.warn("failed to add {}, address {}", e, name, ip_private);
+ final String finalIpPrivate = ip_private;
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to add {}, address {}", name, finalIpPrivate), e);
}
}
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java
index b21d397d78..c005aa05a7 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java
@@ -29,8 +29,8 @@ import com.google.api.client.http.HttpResponse;
import com.google.api.client.http.HttpUnsuccessfulResponseHandler;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.client.util.Sleeper;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.SpecialPermission;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
@@ -43,8 +43,7 @@ public class RetryHttpInitializerWrapper implements HttpRequestInitializer {
private TimeValue maxWait;
- private static final ESLogger logger =
- ESLoggerFactory.getLogger(RetryHttpInitializerWrapper.class.getName());
+ private static final Logger logger = ESLoggerFactory.getLogger(RetryHttpInitializerWrapper.class.getName());
// Intercepts the request for filling in the "Authorization"
// header field, as well as recovering from certain unsuccessful
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java
index 031f7eaf10..aeec991182 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java
@@ -21,6 +21,7 @@ package org.elasticsearch.plugin.discovery.gce;
import com.google.api.client.http.HttpHeaders;
import com.google.api.client.util.ClassInfo;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cloud.gce.GceInstancesService;
import org.elasticsearch.cloud.gce.GceMetadataService;
@@ -28,7 +29,6 @@ import org.elasticsearch.cloud.gce.GceModule;
import org.elasticsearch.cloud.gce.network.GceNameResolver;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@@ -51,7 +51,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin {
public static final String GCE = "gce";
private final Settings settings;
- protected final ESLogger logger = Loggers.getLogger(GceDiscoveryPlugin.class);
+ protected final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class);
static {
/*
diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java
index adb06f2595..98f6fd0dc1 100644
--- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java
+++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java
@@ -23,11 +23,11 @@ import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpServer;
import com.sun.net.httpserver.HttpsConfigurator;
import com.sun.net.httpserver.HttpsServer;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.cloud.gce.GceInstancesServiceImpl;
import org.elasticsearch.cloud.gce.GceMetadataService;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -40,6 +40,7 @@ import org.junit.BeforeClass;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
+
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -125,7 +126,7 @@ public class GceDiscoverTests extends ESIntegTestCase {
httpsServer.createContext("/compute/v1/projects/testproject/zones/primaryzone/instances", (s) -> {
Headers headers = s.getResponseHeaders();
headers.add("Content-Type", "application/json; charset=UTF-8");
- ESLogger logger = Loggers.getLogger(GceDiscoverTests.class);
+ Logger logger = Loggers.getLogger(GceDiscoverTests.class);
try {
Path[] files = FileSystemUtils.files(logDir);
StringBuilder builder = new StringBuilder("{\"id\": \"dummy\",\"items\":[");
diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java
index 88a6fbd9e9..0100955453 100644
--- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java
+++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java
@@ -26,9 +26,9 @@ import com.google.api.client.json.Json;
import com.google.api.client.testing.http.MockHttpTransport;
import com.google.api.client.testing.http.MockLowLevelHttpRequest;
import com.google.api.client.testing.http.MockLowLevelHttpResponse;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.Callback;
@@ -37,7 +37,7 @@ import java.io.InputStream;
import java.net.URL;
public class GceMockUtils {
- protected static final ESLogger logger = Loggers.getLogger(GceMockUtils.class);
+ protected static final Logger logger = Loggers.getLogger(GceMockUtils.class);
public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance";
diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
index 5923e3b690..95a0b85dba 100644
--- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
+++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
@@ -97,7 +97,9 @@ public final class GeoIpProcessor extends AbstractProcessor {
throw new ElasticsearchParseException("Unsupported database type [" + dbReader.getMetadata().getDatabaseType()
+ "]", new IllegalStateException());
}
- ingestDocument.setFieldValue(targetField, geoData);
+ if (geoData.isEmpty() == false) {
+ ingestDocument.setFieldValue(targetField, geoData);
+ }
}
@Override
@@ -149,28 +151,50 @@ public final class GeoIpProcessor extends AbstractProcessor {
geoData.put("ip", NetworkAddress.format(ipAddress));
break;
case COUNTRY_ISO_CODE:
- geoData.put("country_iso_code", country.getIsoCode());
+ String countryIsoCode = country.getIsoCode();
+ if (countryIsoCode != null) {
+ geoData.put("country_iso_code", countryIsoCode);
+ }
break;
case COUNTRY_NAME:
- geoData.put("country_name", country.getName());
+ String countryName = country.getName();
+ if (countryName != null) {
+ geoData.put("country_name", countryName);
+ }
break;
case CONTINENT_NAME:
- geoData.put("continent_name", continent.getName());
+ String continentName = continent.getName();
+ if (continentName != null) {
+ geoData.put("continent_name", continentName);
+ }
break;
case REGION_NAME:
- geoData.put("region_name", subdivision.getName());
+ String subdivisionName = subdivision.getName();
+ if (subdivisionName != null) {
+ geoData.put("region_name", subdivisionName);
+ }
break;
case CITY_NAME:
- geoData.put("city_name", city.getName());
+ String cityName = city.getName();
+ if (cityName != null) {
+ geoData.put("city_name", cityName);
+ }
break;
case TIMEZONE:
- geoData.put("timezone", location.getTimeZone());
+ String locationTimeZone = location.getTimeZone();
+ if (locationTimeZone != null) {
+ geoData.put("timezone", locationTimeZone);
+ }
break;
case LOCATION:
- Map<String, Object> locationObject = new HashMap<>();
- locationObject.put("lat", location.getLatitude());
- locationObject.put("lon", location.getLongitude());
- geoData.put("location", locationObject);
+ Double latitude = location.getLatitude();
+ Double longitude = location.getLongitude();
+ if (latitude != null && longitude != null) {
+ Map<String, Object> locationObject = new HashMap<>();
+ locationObject.put("lat", latitude);
+ locationObject.put("lon", longitude);
+ geoData.put("location", locationObject);
+ }
break;
}
}
@@ -202,13 +226,22 @@ public final class GeoIpProcessor extends AbstractProcessor {
geoData.put("ip", NetworkAddress.format(ipAddress));
break;
case COUNTRY_ISO_CODE:
- geoData.put("country_iso_code", country.getIsoCode());
+ String countryIsoCode = country.getIsoCode();
+ if (countryIsoCode != null) {
+ geoData.put("country_iso_code", countryIsoCode);
+ }
break;
case COUNTRY_NAME:
- geoData.put("country_name", country.getName());
+ String countryName = country.getName();
+ if (countryName != null) {
+ geoData.put("country_name", countryName);
+ }
break;
case CONTINENT_NAME:
- geoData.put("continent_name", continent.getName());
+ String continentName = continent.getName();
+ if (continentName != null) {
+ geoData.put("continent_name", continentName);
+ }
break;
}
}
diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
index f3141c735d..3b2f65e281 100644
--- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
+++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java
@@ -33,6 +33,7 @@ import java.util.zip.GZIPInputStream;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
public class GeoIpProcessorTests extends ESTestCase {
@@ -63,6 +64,51 @@ public class GeoIpProcessorTests extends ESTestCase {
assertThat(geoData.get("location"), equalTo(location));
}
+ public void testCity_withIpV6() throws Exception {
+ InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb.gz");
+ GeoIpProcessor processor = new GeoIpProcessor(randomAsciiOfLength(10), "source_field",
+ new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class));
+
+ String address = "2602:306:33d3:8000::3257:9652";
+ Map<String, Object> document = new HashMap<>();
+ document.put("source_field", address);
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(address));
+ @SuppressWarnings("unchecked")
+ Map<String, Object> geoData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(8));
+ assertThat(geoData.get("ip"), equalTo(address));
+ assertThat(geoData.get("country_iso_code"), equalTo("US"));
+ assertThat(geoData.get("country_name"), equalTo("United States"));
+ assertThat(geoData.get("continent_name"), equalTo("North America"));
+ assertThat(geoData.get("region_name"), equalTo("Florida"));
+ assertThat(geoData.get("city_name"), equalTo("Hollywood"));
+ assertThat(geoData.get("timezone"), equalTo("America/New_York"));
+ Map<String, Object> location = new HashMap<>();
+ location.put("lat", 26.0252d);
+ location.put("lon", -80.296d);
+ assertThat(geoData.get("location"), equalTo(location));
+ }
+
+ public void testCityWithMissingLocation() throws Exception {
+ InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb.gz");
+ GeoIpProcessor processor = new GeoIpProcessor(randomAsciiOfLength(10), "source_field",
+ new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class));
+
+ Map<String, Object> document = new HashMap<>();
+ document.put("source_field", "93.114.45.13");
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo("93.114.45.13"));
+ @SuppressWarnings("unchecked")
+ Map<String, Object> geoData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(1));
+ assertThat(geoData.get("ip"), equalTo("93.114.45.13"));
+ }
+
public void testCountry() throws Exception {
InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb.gz");
GeoIpProcessor processor = new GeoIpProcessor(randomAsciiOfLength(10), "source_field",
@@ -83,6 +129,23 @@ public class GeoIpProcessorTests extends ESTestCase {
assertThat(geoData.get("continent_name"), equalTo("Europe"));
}
+ public void testCountryWithMissingLocation() throws Exception {
+ InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb.gz");
+ GeoIpProcessor processor = new GeoIpProcessor(randomAsciiOfLength(10), "source_field",
+ new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class));
+
+ Map<String, Object> document = new HashMap<>();
+ document.put("source_field", "93.114.45.13");
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
+ processor.execute(ingestDocument);
+
+ assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo("93.114.45.13"));
+ @SuppressWarnings("unchecked")
+ Map<String, Object> geoData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
+ assertThat(geoData.size(), equalTo(1));
+ assertThat(geoData.get("ip"), equalTo("93.114.45.13"));
+ }
+
public void testAddressIsNotInTheDatabase() throws Exception {
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb.gz");
GeoIpProcessor processor = new GeoIpProcessor(randomAsciiOfLength(10), "source_field",
@@ -92,9 +155,7 @@ public class GeoIpProcessorTests extends ESTestCase {
document.put("source_field", "127.0.0.1");
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
- @SuppressWarnings("unchecked")
- Map<String, Object> geoData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
- assertThat(geoData.size(), equalTo(0));
+ assertThat(ingestDocument.getSourceAndMetadata().containsKey("target_field"), is(false));
}
/** Don't silently do DNS lookups or anything trappy on bogus data */
@@ -115,7 +176,7 @@ public class GeoIpProcessorTests extends ESTestCase {
}
}
- static InputStream getDatabaseFileInputStream(String path) throws IOException {
+ private static InputStream getDatabaseFileInputStream(String path) throws IOException {
return new GZIPInputStream(GeoIpProcessor.class.getResourceAsStream(path));
}
diff --git a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yaml b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yaml
index 33e9ec1ca4..f662f34ab5 100644
--- a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yaml
+++ b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yaml
@@ -122,3 +122,76 @@
- length: { _source.geoip: 2 }
- match: { _source.geoip.country_iso_code: "US" }
- match: { _source.geoip.continent_name: "North America" }
+
+---
+"Test geoip processor with geopoint mapping (both missing and including location)":
+ - do:
+ indices.create:
+ index: test
+ body: >
+ {
+ "mappings" : {
+ "test" : {
+ "properties" : {
+ "geoip.location" : {
+ "type": "geo_point"
+ }
+ }
+ }
+ }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "description": "_description",
+ "processors": [
+ {
+ "geoip" : {
+ "field" : "field1"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ type: test
+ id: 1
+ pipeline: "my_pipeline"
+ body: { field1: "93.114.45.13" }
+
+ - do:
+ get:
+ index: test
+ type: test
+ id: 1
+ - match: { _source.field1: "93.114.45.13" }
+ - is_false: _source.geoip
+
+ - do:
+ index:
+ index: test
+ type: test
+ id: 2
+ pipeline: "my_pipeline"
+ body: { field1: "128.101.101.101" }
+
+ - do:
+ get:
+ index: test
+ type: test
+ id: 2
+ - match: { _source.field1: "128.101.101.101" }
+ - length: { _source.geoip: 5 }
+ - match: { _source.geoip.city_name: "Minneapolis" }
+ - match: { _source.geoip.country_iso_code: "US" }
+ - match: { _source.geoip.location.lon: -93.2166 }
+ - match: { _source.geoip.location.lat: 44.9759 }
+ - match: { _source.geoip.region_name: "Minnesota" }
+ - match: { _source.geoip.continent_name: "North America" }
diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExamplePluginConfiguration.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExamplePluginConfiguration.java
index 430d880766..21582ccde5 100644
--- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExamplePluginConfiguration.java
+++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExamplePluginConfiguration.java
@@ -19,51 +19,38 @@
package org.elasticsearch.plugin.example;
-import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import java.io.IOException;
import java.nio.file.Path;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.nio.file.Files.newBufferedReader;
-import static org.elasticsearch.common.io.Streams.copyToString;
+import java.util.Locale;
/**
* Example configuration.
*/
public class ExamplePluginConfiguration {
- private String test = "not set in config";
+
+ private final Settings customSettings;
+
+ public static final Setting<String> TEST_SETTING =
+ new Setting<String>("test", "default_value",
+ (value) -> value, Setting.Property.Dynamic);
@Inject
public ExamplePluginConfiguration(Environment env) throws IOException {
// The directory part of the location matches the artifactId of this plugin
- Path configFile = env.configFile().resolve("jvm-example/example.yaml");
- String contents = copyToString(newBufferedReader(configFile, UTF_8));
- XContentParser parser = YamlXContent.yamlXContent.createParser(contents);
+ Path path = env.configFile().resolve("jvm-example/example.yaml");
+ customSettings = Settings.builder().loadFromPath(path).build();
- String currentFieldName = null;
- XContentParser.Token token = parser.nextToken();
- assert token == XContentParser.Token.START_OBJECT;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token.isValue()) {
- if ("test".equals(currentFieldName)) {
- test = parser.text();
- } else {
- throw new ElasticsearchParseException("Unrecognized config key: {}", currentFieldName);
- }
- } else {
- throw new ElasticsearchParseException("Unrecognized config key: {}", currentFieldName);
- }
- }
+ // asserts for tests
+ assert customSettings != null;
+ assert TEST_SETTING.get(customSettings) != null;
}
public String getTestConfig() {
- return test;
+ return TEST_SETTING.get(customSettings);
}
}
diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java
index 3477f62a5b..da3d14cd02 100644
--- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java
+++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java
@@ -25,6 +25,7 @@ import org.elasticsearch.SpecialPermission;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ClassPermission;
import org.elasticsearch.script.CompiledScript;
@@ -138,6 +139,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
public JavaScriptScriptEngineService(Settings settings) {
super(settings);
+ deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
+
Context ctx = Context.enter();
try {
globalScope = ctx.initStandardObjects(null, true);
@@ -173,6 +176,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
@Override
public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> vars) {
+ deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
+
Context ctx = Context.enter();
try {
Scriptable scope = ctx.newObject(globalScope);
@@ -192,6 +197,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
@Override
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
+ deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
+
Context ctx = Context.enter();
try {
final Scriptable scope = ctx.newObject(globalScope);
diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java
index 634a4ca6df..c3614952ec 100644
--- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java
+++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java
@@ -41,7 +41,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
final Object compiled = se.compile(null, "x + y", Collections.emptyMap());
final AtomicBoolean failed = new AtomicBoolean();
- Thread[] threads = new Thread[50];
+ Thread[] threads = new Thread[between(3, 12)];
final CountDownLatch latch = new CountDownLatch(threads.length);
final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1);
for (int i = 0; i < threads.length; i++) {
@@ -57,7 +57,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
vars.put("x", x);
vars.put("y", y);
ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars);
- for (int i = 0; i < 100000; i++) {
+ for (int i = 0; i < between(100, 1000); i++) {
long result = ((Number) script.run()).longValue();
assertThat(result, equalTo(addition));
}
@@ -83,7 +83,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
final Object compiled = se.compile(null, "x + y", Collections.emptyMap());
final AtomicBoolean failed = new AtomicBoolean();
- Thread[] threads = new Thread[50];
+ Thread[] threads = new Thread[between(3, 12)];
final CountDownLatch latch = new CountDownLatch(threads.length);
final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1);
for (int i = 0; i < threads.length; i++) {
@@ -96,7 +96,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
Map<String, Object> vars = new HashMap<String, Object>();
vars.put("x", x);
ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars);
- for (int i = 0; i < 100000; i++) {
+ for (int i = 0; i < between(100, 1000); i++) {
long y = Randomness.get().nextInt();
long addition = x + y;
script.setNextVar("y", y);
@@ -125,7 +125,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
final Object compiled = se.compile(null, "x + y", Collections.emptyMap());
final AtomicBoolean failed = new AtomicBoolean();
- Thread[] threads = new Thread[50];
+ Thread[] threads = new Thread[between(3, 12)];
final CountDownLatch latch = new CountDownLatch(threads.length);
final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1);
for (int i = 0; i < threads.length; i++) {
@@ -135,7 +135,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase {
try {
barrier.await();
Map<String, Object> runtimeVars = new HashMap<String, Object>();
- for (int i = 0; i < 100000; i++) {
+ for (int i = 0; i < between(100, 1000); i++) {
long x = Randomness.get().nextInt();
long y = Randomness.get().nextInt();
long addition = x + y;
diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java
index 5a16c06d4d..d31e691b99 100644
--- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java
+++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java
@@ -62,6 +62,8 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri
public PythonScriptEngineService(Settings settings) {
super(settings);
+ deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead.");
+
// classloader created here
final SecurityManager sm = System.getSecurityManager();
if (sm != null) {
@@ -118,11 +120,15 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri
@Override
public ExecutableScript executable(CompiledScript compiledScript, Map<String, Object> vars) {
+ deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead");
+
return new PythonExecutableScript((PyCode) compiledScript.compiled(), vars);
}
@Override
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
+ deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead");
+
return new SearchScript() {
@Override
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
diff --git a/plugins/mapper-attachments/build.gradle b/plugins/mapper-attachments/build.gradle
deleted file mode 100644
index b00f61867e..0000000000
--- a/plugins/mapper-attachments/build.gradle
+++ /dev/null
@@ -1,2051 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-esplugin {
- description 'The mapper attachments plugin adds the attachment type to Elasticsearch using Apache Tika.'
- classname 'org.elasticsearch.mapper.attachments.MapperAttachmentsPlugin'
-}
-
-versions << [
- 'tika': '1.13',
- 'pdfbox': '2.0.1',
- 'bouncycastle': '1.54',
- 'poi': '3.15-beta1'
-]
-
-dependencies {
- // mandatory for tika
- compile "org.apache.tika:tika-core:${versions.tika}"
- compile "org.apache.tika:tika-parsers:${versions.tika}"
- compile 'commons-io:commons-io:2.4'
-
- // character set detection
- compile 'com.googlecode.juniversalchardet:juniversalchardet:1.0.3'
-
- // external parser libraries
- // HTML
- compile 'org.ccil.cowan.tagsoup:tagsoup:1.2.1'
- // Adobe PDF
- compile "org.apache.pdfbox:pdfbox:${versions.pdfbox}"
- compile "org.apache.pdfbox:fontbox:${versions.pdfbox}"
- compile "org.apache.pdfbox:jempbox:1.8.12"
- compile "commons-logging:commons-logging:${versions.commonslogging}"
- compile "org.bouncycastle:bcmail-jdk15on:${versions.bouncycastle}"
- compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}"
- compile "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}"
- // OpenOffice
- compile "org.apache.poi:poi-ooxml:${versions.poi}"
- compile "org.apache.poi:poi:${versions.poi}"
- compile "org.apache.poi:poi-ooxml-schemas:${versions.poi}"
- compile "commons-codec:commons-codec:${versions.commonscodec}"
- compile 'org.apache.xmlbeans:xmlbeans:2.6.0'
- // MS Office
- compile "org.apache.poi:poi-scratchpad:${versions.poi}"
- // Apple iWork
- compile 'org.apache.commons:commons-compress:1.10'
-}
-
-// TODO: stop using LanguageIdentifier...
-compileJava.options.compilerArgs << "-Xlint:-deprecation"
-
-forbiddenPatterns {
- exclude '**/*.docx'
- exclude '**/*.pdf'
- exclude '**/*.epub'
-}
-
-thirdPartyAudit.excludes = [
- // classes are missing: some due to our whitelisting of parsers
- 'com.coremedia.iso.IsoFile',
- 'com.coremedia.iso.boxes.Box',
- 'com.coremedia.iso.boxes.Container',
- 'com.coremedia.iso.boxes.FileTypeBox',
- 'com.coremedia.iso.boxes.MetaBox',
- 'com.coremedia.iso.boxes.MovieBox',
- 'com.coremedia.iso.boxes.MovieHeaderBox',
- 'com.coremedia.iso.boxes.SampleTableBox',
- 'com.coremedia.iso.boxes.TrackBox',
- 'com.coremedia.iso.boxes.TrackHeaderBox',
- 'com.coremedia.iso.boxes.UserDataBox',
- 'com.coremedia.iso.boxes.apple.AppleItemListBox',
- 'com.coremedia.iso.boxes.sampleentry.AudioSampleEntry',
- 'com.drew.imaging.jpeg.JpegMetadataReader',
- 'com.drew.imaging.tiff.TiffMetadataReader',
- 'com.drew.imaging.webp.WebpMetadataReader',
- 'com.drew.lang.ByteArrayReader',
- 'com.drew.lang.GeoLocation',
- 'com.drew.lang.Rational',
- 'com.drew.metadata.Directory',
- 'com.drew.metadata.Metadata',
- 'com.drew.metadata.Tag',
- 'com.drew.metadata.exif.ExifIFD0Directory',
- 'com.drew.metadata.exif.ExifReader',
- 'com.drew.metadata.exif.ExifSubIFDDirectory',
- 'com.drew.metadata.exif.ExifThumbnailDirectory',
- 'com.drew.metadata.exif.GpsDirectory',
- 'com.drew.metadata.iptc.IptcDirectory',
- 'com.drew.metadata.jpeg.JpegCommentDirectory',
- 'com.drew.metadata.jpeg.JpegDirectory',
- 'com.github.junrar.Archive',
- 'com.github.junrar.rarfile.FileHeader',
- 'com.google.common.reflect.TypeToken',
- 'com.google.gson.Gson',
- 'com.googlecode.mp4parser.DataSource',
- 'com.googlecode.mp4parser.boxes.apple.AppleAlbumBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleArtist2Box',
- 'com.googlecode.mp4parser.boxes.apple.AppleArtistBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleCommentBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleCompilationBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleDiskNumberBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleEncoderBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleGenreBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleNameBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleRecordingYear2Box',
- 'com.googlecode.mp4parser.boxes.apple.AppleTrackAuthorBox',
- 'com.googlecode.mp4parser.boxes.apple.AppleTrackNumberBox',
- 'com.googlecode.mp4parser.boxes.apple.Utf8AppleDataBox',
- 'com.googlecode.mp4parser.util.CastUtils',
- 'com.graphbuilder.curve.ControlPath',
- 'com.graphbuilder.curve.GroupIterator',
- 'com.graphbuilder.curve.NURBSpline',
- 'com.graphbuilder.curve.ShapeMultiPath',
- 'com.graphbuilder.curve.ValueVector',
- 'com.graphbuilder.geom.PointFactory',
- 'com.healthmarketscience.jackcess.Column',
- 'com.healthmarketscience.jackcess.CryptCodecProvider',
- 'com.healthmarketscience.jackcess.DataType',
- 'com.healthmarketscience.jackcess.Database',
- 'com.healthmarketscience.jackcess.DatabaseBuilder',
- 'com.healthmarketscience.jackcess.PropertyMap$Property',
- 'com.healthmarketscience.jackcess.PropertyMap',
- 'com.healthmarketscience.jackcess.Row',
- 'com.healthmarketscience.jackcess.Table',
- 'com.healthmarketscience.jackcess.query.Query',
- 'com.healthmarketscience.jackcess.util.LinkResolver',
- 'com.healthmarketscience.jackcess.util.OleBlob$CompoundContent',
- 'com.healthmarketscience.jackcess.util.OleBlob$Content',
- 'com.healthmarketscience.jackcess.util.OleBlob$ContentType',
- 'com.healthmarketscience.jackcess.util.OleBlob$LinkContent',
- 'com.healthmarketscience.jackcess.util.OleBlob$OtherContent',
- 'com.healthmarketscience.jackcess.util.OleBlob$SimplePackageContent',
- 'com.healthmarketscience.jackcess.util.OleBlob',
- 'com.healthmarketscience.jackcess.util.TableIterableBuilder',
- 'com.jmatio.io.MatFileHeader',
- 'com.jmatio.io.MatFileReader',
- 'com.jmatio.types.MLArray',
- 'com.jmatio.types.MLStructure',
- 'com.microsoft.schemas.office.excel.STCF',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1Accel2List',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AccelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AnchorList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AutoFillList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AutoLineList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AutoPictList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1AutoScaleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1CFList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1CameraList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1CancelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1CheckedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ColHiddenList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ColoredList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ColumnList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DDEList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DefaultList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DefaultSizeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DisabledList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DismissList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DropLinesList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DropStyleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1DxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FirstButtonList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaGroupList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaLinkList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaMacroList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaPictList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaRangeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1FmlaTxbxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1HelpList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1HorizList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1IncList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1JustLastXList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1LCTList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ListItemList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1LockTextList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1LockedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MapOCXList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MaxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MinList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MoveWithCellsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MultiLineList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1MultiSelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1NoThreeD2List',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1NoThreeDList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1PageList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1PrintObjectList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1RecalcAlwaysList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1RowHiddenList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1RowList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ScriptExtendedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ScriptLanguageList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ScriptLocationList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ScriptTextList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1SecretEditList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1SelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1SelTypeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1SizeWithCellsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1TextHAlignList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1TextVAlignList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1UIObjList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1VScrollList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1VTEditList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ValList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ValidIdsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1VisibleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1WidthMinList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2Accel2List',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AccelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AnchorList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AutoFillList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AutoLineList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AutoPictList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AutoScaleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2CFList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2CameraList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2CancelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2CheckedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ColHiddenList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ColoredList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ColumnList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DDEList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DefaultList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DefaultSizeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DisabledList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DismissList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DropLinesList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DropStyleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2DxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FirstButtonList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaGroupList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaLinkList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaMacroList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaPictList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaRangeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2FmlaTxbxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2HelpList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2HorizList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2IncList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2JustLastXList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2LCTList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ListItemList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2LockTextList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2LockedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MapOCXList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MaxList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MinList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MoveWithCellsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MultiLineList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2MultiSelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2NoThreeD2List',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2NoThreeDList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2PageList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2PrintObjectList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2RecalcAlwaysList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2RowHiddenList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2RowList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ScriptExtendedList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ScriptLanguageList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ScriptLocationList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ScriptTextList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2SecretEditList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2SelList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2SelTypeList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2SizeWithCellsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2TextHAlignList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2TextVAlignList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2UIObjList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2VScrollList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2VTEditList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ValList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2ValidIdsList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2VisibleList',
- 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2WidthMinList',
- 'com.microsoft.schemas.office.office.CTCallout',
- 'com.microsoft.schemas.office.office.CTClipPath',
- 'com.microsoft.schemas.office.office.CTComplex',
- 'com.microsoft.schemas.office.office.CTDiagram',
- 'com.microsoft.schemas.office.office.CTExtrusion',
- 'com.microsoft.schemas.office.office.CTFill',
- 'com.microsoft.schemas.office.office.CTInk',
- 'com.microsoft.schemas.office.office.CTRegroupTable',
- 'com.microsoft.schemas.office.office.CTRules',
- 'com.microsoft.schemas.office.office.CTSignatureLine',
- 'com.microsoft.schemas.office.office.CTSkew',
- 'com.microsoft.schemas.office.office.CTStrokeChild',
- 'com.microsoft.schemas.office.office.STBWMode',
- 'com.microsoft.schemas.office.office.STConnectorType',
- 'com.microsoft.schemas.office.office.STHrAlign',
- 'com.microsoft.schemas.office.office.STRelationshipId',
- 'com.microsoft.schemas.office.office.STTrueFalse',
- 'com.microsoft.schemas.office.office.STTrueFalseBlank',
- 'com.microsoft.schemas.office.powerpoint.CTEmpty',
- 'com.microsoft.schemas.office.powerpoint.CTRel',
- 'com.microsoft.schemas.office.visio.x2012.main.AttachedToolbarsType',
- 'com.microsoft.schemas.office.visio.x2012.main.ColorsType',
- 'com.microsoft.schemas.office.visio.x2012.main.ConnectType',
- 'com.microsoft.schemas.office.visio.x2012.main.ConnectsType',
- 'com.microsoft.schemas.office.visio.x2012.main.CpType',
- 'com.microsoft.schemas.office.visio.x2012.main.CustomMenusFileType',
- 'com.microsoft.schemas.office.visio.x2012.main.CustomToolbarsFileType',
- 'com.microsoft.schemas.office.visio.x2012.main.DataType',
- 'com.microsoft.schemas.office.visio.x2012.main.DocumentSheetType',
- 'com.microsoft.schemas.office.visio.x2012.main.DynamicGridEnabledType',
- 'com.microsoft.schemas.office.visio.x2012.main.EventListType',
- 'com.microsoft.schemas.office.visio.x2012.main.FaceNamesType',
- 'com.microsoft.schemas.office.visio.x2012.main.FldType',
- 'com.microsoft.schemas.office.visio.x2012.main.ForeignDataType',
- 'com.microsoft.schemas.office.visio.x2012.main.GlueSettingsType',
- 'com.microsoft.schemas.office.visio.x2012.main.HeaderFooterType',
- 'com.microsoft.schemas.office.visio.x2012.main.IconType',
- 'com.microsoft.schemas.office.visio.x2012.main.MasterShortcutType',
- 'com.microsoft.schemas.office.visio.x2012.main.PpType',
- 'com.microsoft.schemas.office.visio.x2012.main.ProtectBkgndsType',
- 'com.microsoft.schemas.office.visio.x2012.main.ProtectMastersType',
- 'com.microsoft.schemas.office.visio.x2012.main.ProtectShapesType',
- 'com.microsoft.schemas.office.visio.x2012.main.ProtectStylesType',
- 'com.microsoft.schemas.office.visio.x2012.main.PublishSettingsType',
- 'com.microsoft.schemas.office.visio.x2012.main.RefByType',
- 'com.microsoft.schemas.office.visio.x2012.main.SnapAnglesType',
- 'com.microsoft.schemas.office.visio.x2012.main.SnapExtensionsType',
- 'com.microsoft.schemas.office.visio.x2012.main.SnapSettingsType',
- 'com.microsoft.schemas.office.visio.x2012.main.TpType',
- 'com.microsoft.schemas.office.visio.x2012.main.TriggerType',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.CellTypeImpl$1RefByList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.MastersTypeImpl$1MasterList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.MastersTypeImpl$1MasterShortcutList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.PagesTypeImpl$1PageList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.RowTypeImpl$1CellList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.RowTypeImpl$1TriggerList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SectionTypeImpl$1CellList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SectionTypeImpl$1RowList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SectionTypeImpl$1TriggerList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.ShapesTypeImpl$1ShapeList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SheetTypeImpl$1CellList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SheetTypeImpl$1SectionList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.SheetTypeImpl$1TriggerList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.StyleSheetsTypeImpl$1StyleSheetList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.TextTypeImpl$1CpList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.TextTypeImpl$1FldList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.TextTypeImpl$1PpList',
- 'com.microsoft.schemas.office.visio.x2012.main.impl.TextTypeImpl$1TpList',
- 'com.microsoft.schemas.office.word.CTAnchorLock',
- 'com.microsoft.schemas.office.word.CTBorder',
- 'com.microsoft.schemas.office.word.CTWrap',
- 'com.microsoft.schemas.office.x2006.digsig.STPositiveInteger',
- 'com.microsoft.schemas.office.x2006.digsig.STSignatureComments',
- 'com.microsoft.schemas.office.x2006.digsig.STSignatureProviderUrl',
- 'com.microsoft.schemas.office.x2006.digsig.STSignatureText',
- 'com.microsoft.schemas.office.x2006.digsig.STSignatureType',
- 'com.microsoft.schemas.office.x2006.digsig.STUniqueIdentifierWithBraces',
- 'com.microsoft.schemas.office.x2006.digsig.STVersion',
- 'com.microsoft.schemas.vml.CTArc',
- 'com.microsoft.schemas.vml.CTCurve',
- 'com.microsoft.schemas.vml.CTImage',
- 'com.microsoft.schemas.vml.CTImageData',
- 'com.microsoft.schemas.vml.CTLine',
- 'com.microsoft.schemas.vml.CTOval',
- 'com.microsoft.schemas.vml.CTPolyLine',
- 'com.microsoft.schemas.vml.CTRect',
- 'com.microsoft.schemas.vml.CTRoundRect',
- 'com.microsoft.schemas.vml.STEditAs',
- 'com.microsoft.schemas.vml.STFillMethod',
- 'com.microsoft.schemas.vml.STFillType',
- 'com.microsoft.schemas.vml.STImageAspect',
- 'com.microsoft.schemas.vml.STShadowType',
- 'com.microsoft.schemas.vml.STStrokeArrowLength',
- 'com.microsoft.schemas.vml.STStrokeArrowType',
- 'com.microsoft.schemas.vml.STStrokeArrowWidth',
- 'com.microsoft.schemas.vml.STStrokeEndCap',
- 'com.microsoft.schemas.vml.STStrokeLineStyle',
- 'com.microsoft.schemas.vml.STTrueFalseBlank',
- 'com.microsoft.schemas.vml.impl.CTFormulasImpl$1FList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1AnchorlockList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ArcList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1BorderbottomList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1BorderleftList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1BorderrightList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1BordertopList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1CalloutList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ClientDataList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ClippathList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1CurveList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1DiagramList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ExtrusionList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1FillList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1FormulasList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1GroupList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1HandlesList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ImageList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ImagedataList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1LineList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1LockList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1OvalList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1PathList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1PolylineList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1RectList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1RoundrectList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ShadowList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ShapeList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1ShapetypeList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1SignaturelineList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1SkewList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1StrokeList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1TextboxList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1TextdataList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1TextpathList',
- 'com.microsoft.schemas.vml.impl.CTGroupImpl$1WrapList',
- 'com.microsoft.schemas.vml.impl.CTHandlesImpl$1HList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1AnchorlockList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1BorderbottomList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1BorderleftList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1BorderrightList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1BordertopList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1CalloutList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1ClippathList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1ExtrusionList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1FillList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1FormulasList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1HandlesList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1ImagedataList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1InkList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1IscommentList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1LockList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1PathList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1ShadowList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1SignaturelineList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1SkewList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1StrokeList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1TextboxList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1TextdataList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1TextpathList',
- 'com.microsoft.schemas.vml.impl.CTShapeImpl$1WrapList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1AnchorlockList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1BorderbottomList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1BorderleftList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1BorderrightList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1BordertopList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1CalloutList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1ClientDataList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1ClippathList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1ExtrusionList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1FillList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1FormulasList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1HandlesList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1ImagedataList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1LockList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1PathList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1ShadowList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1SignaturelineList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1SkewList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1StrokeList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1TextboxList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1TextdataList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1TextpathList',
- 'com.microsoft.schemas.vml.impl.CTShapetypeImpl$1WrapList',
- 'com.pff.PSTAttachment',
- 'com.pff.PSTFile',
- 'com.pff.PSTFolder',
- 'com.pff.PSTMessage',
- 'com.rometools.rome.feed.synd.SyndContent',
- 'com.rometools.rome.feed.synd.SyndEntry',
- 'com.rometools.rome.feed.synd.SyndFeed',
- 'com.rometools.rome.io.SyndFeedInput',
- 'com.uwyn.jhighlight.renderer.Renderer',
- 'com.uwyn.jhighlight.renderer.XhtmlRendererFactory',
- 'de.l3s.boilerpipe.BoilerpipeExtractor',
- 'de.l3s.boilerpipe.document.TextBlock',
- 'de.l3s.boilerpipe.document.TextDocument',
- 'de.l3s.boilerpipe.extractors.DefaultExtractor',
- 'de.l3s.boilerpipe.sax.BoilerpipeHTMLContentHandler',
- 'javax.mail.BodyPart',
- 'javax.mail.Header',
- 'javax.mail.Message$RecipientType',
- 'javax.mail.MessagingException',
- 'javax.mail.Multipart',
- 'javax.mail.Part',
- 'javax.mail.Session',
- 'javax.mail.Transport',
- 'javax.mail.internet.ContentType',
- 'javax.mail.internet.InternetAddress',
- 'javax.mail.internet.InternetHeaders',
- 'javax.mail.internet.MimeBodyPart',
- 'javax.mail.internet.MimeMessage',
- 'javax.mail.internet.MimeMultipart',
- 'javax.mail.internet.MimePart',
- 'javax.mail.internet.SharedInputStream',
- 'javax.servlet.ServletContextEvent',
- 'javax.servlet.ServletContextListener',
- 'javax.ws.rs.core.Response',
- 'junit.framework.TestCase',
- 'opennlp.tools.namefind.NameFinderME',
- 'opennlp.tools.namefind.TokenNameFinderModel',
- 'opennlp.tools.util.Span',
- 'org.apache.avalon.framework.logger.Logger',
- 'org.apache.commons.csv.CSVFormat',
- 'org.apache.commons.csv.CSVParser',
- 'org.apache.commons.csv.CSVRecord',
- 'org.apache.commons.exec.CommandLine',
- 'org.apache.commons.exec.DefaultExecutor',
- 'org.apache.commons.exec.ExecuteWatchdog',
- 'org.apache.commons.exec.PumpStreamHandler',
- 'org.apache.commons.exec.environment.EnvironmentUtils',
- 'org.apache.ctakes.typesystem.type.refsem.UmlsConcept',
- 'org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation',
- 'org.apache.cxf.jaxrs.client.WebClient',
- 'org.apache.cxf.jaxrs.ext.multipart.Attachment',
- 'org.apache.cxf.jaxrs.ext.multipart.ContentDisposition',
- 'org.apache.cxf.jaxrs.ext.multipart.MultipartBody',
- 'org.apache.http.HttpEntity',
- 'org.apache.http.HttpResponse',
- 'org.apache.http.StatusLine',
- 'org.apache.http.client.HttpClient',
- 'org.apache.http.client.methods.HttpGet',
- 'org.apache.http.client.utils.URIBuilder',
- 'org.apache.http.impl.client.DefaultHttpClient',
- 'org.apache.james.mime4j.MimeException',
- 'org.apache.james.mime4j.codec.DecodeMonitor',
- 'org.apache.james.mime4j.codec.DecoderUtil',
- 'org.apache.james.mime4j.dom.FieldParser',
- 'org.apache.james.mime4j.dom.address.Address',
- 'org.apache.james.mime4j.dom.address.AddressList',
- 'org.apache.james.mime4j.dom.address.Mailbox',
- 'org.apache.james.mime4j.dom.address.MailboxList',
- 'org.apache.james.mime4j.dom.field.AddressListField',
- 'org.apache.james.mime4j.dom.field.DateTimeField',
- 'org.apache.james.mime4j.dom.field.MailboxListField',
- 'org.apache.james.mime4j.dom.field.ParsedField',
- 'org.apache.james.mime4j.dom.field.UnstructuredField',
- 'org.apache.james.mime4j.field.LenientFieldParser',
- 'org.apache.james.mime4j.parser.ContentHandler',
- 'org.apache.james.mime4j.parser.MimeStreamParser',
- 'org.apache.james.mime4j.stream.BodyDescriptor',
- 'org.apache.james.mime4j.stream.Field',
- 'org.apache.james.mime4j.stream.MimeConfig',
- 'org.apache.jcp.xml.dsig.internal.dom.DOMDigestMethod',
- 'org.apache.jcp.xml.dsig.internal.dom.DOMKeyInfo',
- 'org.apache.jcp.xml.dsig.internal.dom.DOMReference',
- 'org.apache.jcp.xml.dsig.internal.dom.DOMSignedInfo',
- 'org.apache.log.Hierarchy',
- 'org.apache.log.Logger',
- 'org.apache.pdfbox.tools.imageio.ImageIOUtil',
- 'org.apache.sis.internal.util.CheckedArrayList',
- 'org.apache.sis.internal.util.CheckedHashSet',
- 'org.apache.sis.metadata.iso.DefaultMetadata',
- 'org.apache.sis.metadata.iso.DefaultMetadataScope',
- 'org.apache.sis.metadata.iso.constraint.DefaultLegalConstraints',
- 'org.apache.sis.metadata.iso.extent.DefaultGeographicBoundingBox',
- 'org.apache.sis.metadata.iso.extent.DefaultGeographicDescription',
- 'org.apache.sis.metadata.iso.identification.DefaultDataIdentification',
- 'org.apache.sis.storage.DataStore',
- 'org.apache.sis.storage.DataStores',
- 'org.apache.sis.util.collection.CodeListSet',
- 'org.apache.tools.ant.BuildException',
- 'org.apache.tools.ant.FileScanner',
- 'org.apache.tools.ant.Project',
- 'org.apache.tools.ant.taskdefs.Jar',
- 'org.apache.tools.ant.taskdefs.Javac',
- 'org.apache.tools.ant.taskdefs.MatchingTask',
- 'org.apache.tools.ant.types.FileSet',
- 'org.apache.tools.ant.types.Path$PathElement',
- 'org.apache.tools.ant.types.Path',
- 'org.apache.tools.ant.types.Reference',
- 'org.apache.uima.UIMAFramework',
- 'org.apache.uima.analysis_engine.AnalysisEngine',
- 'org.apache.uima.cas.Type',
- 'org.apache.uima.cas.impl.XCASSerializer',
- 'org.apache.uima.cas.impl.XmiCasSerializer',
- 'org.apache.uima.cas.impl.XmiSerializationSharedData',
- 'org.apache.uima.fit.util.JCasUtil',
- 'org.apache.uima.jcas.JCas',
- 'org.apache.uima.jcas.cas.FSArray',
- 'org.apache.uima.util.XMLInputSource',
- 'org.apache.uima.util.XMLParser',
- 'org.apache.uima.util.XmlCasSerializer',
- 'org.apache.xml.security.Init',
- 'org.apache.xml.security.c14n.Canonicalizer',
- 'org.apache.xml.security.utils.Base64',
- 'org.etsi.uri.x01903.v13.AnyType',
- 'org.etsi.uri.x01903.v13.ClaimedRolesListType',
- 'org.etsi.uri.x01903.v13.CounterSignatureType',
- 'org.etsi.uri.x01903.v13.DataObjectFormatType$Factory',
- 'org.etsi.uri.x01903.v13.DataObjectFormatType',
- 'org.etsi.uri.x01903.v13.IdentifierType',
- 'org.etsi.uri.x01903.v13.IncludeType',
- 'org.etsi.uri.x01903.v13.ObjectIdentifierType',
- 'org.etsi.uri.x01903.v13.OtherCertStatusRefsType',
- 'org.etsi.uri.x01903.v13.OtherCertStatusValuesType',
- 'org.etsi.uri.x01903.v13.ReferenceInfoType',
- 'org.etsi.uri.x01903.v13.SigPolicyQualifiersListType',
- 'org.etsi.uri.x01903.v13.SignaturePolicyIdType',
- 'org.etsi.uri.x01903.v13.SignatureProductionPlaceType',
- 'org.etsi.uri.x01903.v13.SignedDataObjectPropertiesType',
- 'org.etsi.uri.x01903.v13.SignerRoleType',
- 'org.etsi.uri.x01903.v13.UnsignedDataObjectPropertiesType',
- 'org.etsi.uri.x01903.v13.impl.CRLRefsTypeImpl$1CRLRefList',
- 'org.etsi.uri.x01903.v13.impl.CRLValuesTypeImpl$1EncapsulatedCRLValueList',
- 'org.etsi.uri.x01903.v13.impl.CertIDListTypeImpl$1CertList',
- 'org.etsi.uri.x01903.v13.impl.CertificateValuesTypeImpl$1EncapsulatedX509CertificateList',
- 'org.etsi.uri.x01903.v13.impl.CertificateValuesTypeImpl$1OtherCertificateList',
- 'org.etsi.uri.x01903.v13.impl.GenericTimeStampTypeImpl$1EncapsulatedTimeStampList',
- 'org.etsi.uri.x01903.v13.impl.GenericTimeStampTypeImpl$1IncludeList',
- 'org.etsi.uri.x01903.v13.impl.GenericTimeStampTypeImpl$1ReferenceInfoList',
- 'org.etsi.uri.x01903.v13.impl.GenericTimeStampTypeImpl$1XMLTimeStampList',
- 'org.etsi.uri.x01903.v13.impl.OCSPRefsTypeImpl$1OCSPRefList',
- 'org.etsi.uri.x01903.v13.impl.OCSPValuesTypeImpl$1EncapsulatedOCSPValueList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1ArchiveTimeStampList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1AttrAuthoritiesCertValuesList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1AttributeCertificateRefsList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1AttributeRevocationRefsList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1AttributeRevocationValuesList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1CertificateValuesList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1CompleteCertificateRefsList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1CompleteRevocationRefsList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1CounterSignatureList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1RefsOnlyTimeStampList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1RevocationValuesList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SigAndRefsTimeStampList',
- 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SignatureTimeStampList',
- 'org.etsi.uri.x01903.v14.ValidationDataType$Factory',
- 'org.etsi.uri.x01903.v14.ValidationDataType',
- 'org.json.JSONArray',
- 'org.json.JSONObject',
- 'org.json.XML',
- 'org.json.simple.JSONArray',
- 'org.json.simple.JSONObject',
- 'org.json.simple.parser.JSONParser',
- 'org.junit.Test',
- 'org.junit.internal.TextListener',
- 'org.junit.runner.JUnitCore',
- 'org.junit.runner.Result',
- 'org.objectweb.asm.AnnotationVisitor',
- 'org.objectweb.asm.Attribute',
- 'org.objectweb.asm.ClassReader',
- 'org.objectweb.asm.ClassVisitor',
- 'org.objectweb.asm.FieldVisitor',
- 'org.objectweb.asm.MethodVisitor',
- 'org.objectweb.asm.Type',
- 'org.opengis.metadata.Identifier',
- 'org.opengis.metadata.citation.Address',
- 'org.opengis.metadata.citation.Citation',
- 'org.opengis.metadata.citation.CitationDate',
- 'org.opengis.metadata.citation.Contact',
- 'org.opengis.metadata.citation.DateType',
- 'org.opengis.metadata.citation.OnLineFunction',
- 'org.opengis.metadata.citation.OnlineResource',
- 'org.opengis.metadata.citation.ResponsibleParty',
- 'org.opengis.metadata.citation.Role',
- 'org.opengis.metadata.constraint.Restriction',
- 'org.opengis.metadata.distribution.DigitalTransferOptions',
- 'org.opengis.metadata.distribution.Distribution',
- 'org.opengis.metadata.distribution.Distributor',
- 'org.opengis.metadata.distribution.Format',
- 'org.opengis.metadata.extent.Extent',
- 'org.opengis.metadata.identification.Identification',
- 'org.opengis.metadata.identification.KeywordType',
- 'org.opengis.metadata.identification.Keywords',
- 'org.opengis.metadata.identification.Progress',
- 'org.opengis.metadata.identification.TopicCategory',
- 'org.opengis.metadata.maintenance.ScopeCode',
- 'org.opengis.util.InternationalString',
-
- // Missing openxml schema classes are explained by the fact we use the smaller jar:
- // "The full jar of all of the schemas is ooxml-schemas-xx.jar, and it is currently around 15mb.
- // The smaller poi-ooxml-schemas jar is only about 4mb.
- // This latter jar file only contains the typically used parts though."
- // http://poi.apache.org/faq.html#faq-N10025
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTArea3DChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTAreaChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTAxisUnit',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTBar3DChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTBarChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTBubbleChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTChartLines',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDLbls',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDPt',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDTable',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDateAx',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDispBlanksAs',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDispUnits',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTDoughnutChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTErrBars',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTExtensionList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTExternalData',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTFirstSliceAng',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTGrouping',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTLblAlgn',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTLblOffset',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTLegendEntry',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTLine3DChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTMarkerSize',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTMultiLvlStrRef',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTOfPieChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTPie3DChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTPivotFmts',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTPivotSource',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTProtection',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTRadarChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTRelId',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTSerAx',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTSkip',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTStockChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTStyle',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTSurface',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTSurface3DChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTSurfaceChart',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTTextLanguageID',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTTrendline',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTUpDownBars',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.CTView3D',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.STPageSetupOrientation',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTLegendImpl$1LegendEntryList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTLineChartImpl$1AxIdList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTLineChartImpl$1SerList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTLineSerImpl$1DPtList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTLineSerImpl$1TrendlineList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTNumDataImpl$1PtList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPieChartImpl$1SerList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPieSerImpl$1DPtList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1Area3DChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1AreaChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1Bar3DChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1BarChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1BubbleChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1CatAxList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1DateAxList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1DoughnutChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1Line3DChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1LineChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1OfPieChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1Pie3DChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1PieChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1RadarChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1ScatterChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1SerAxList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1StockChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1Surface3DChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1SurfaceChartList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTPlotAreaImpl$1ValAxList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTScatterChartImpl$1AxIdList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTScatterChartImpl$1SerList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTScatterSerImpl$1DPtList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTScatterSerImpl$1ErrBarsList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTScatterSerImpl$1TrendlineList',
- 'org.openxmlformats.schemas.drawingml.x2006.chart.impl.CTStrDataImpl$1PtList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaBiLevelEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaCeilingEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaFloorEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaInverseEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaModulateEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAlphaReplaceEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAngle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAudioCD',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTAudioFile',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTBiLevelEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTBlurEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTCell3D',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTColorChangeEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTColorReplaceEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTColorSchemeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTComplementTransform',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTConnectionSite',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTConnectorLocking',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTCustomColorList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTDashStopList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTDuotoneEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTEffectContainer',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTEmbeddedWAVAudioFile',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTFillOverlayEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTFlatText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGammaTransform',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGlowEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGrayscaleEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGrayscaleTransform',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGroupFillProperties',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTGroupLocking',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTHSLEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTInnerShadowEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTInverseGammaTransform',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTInverseTransform',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTLineJoinBevel',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTLuminanceEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTObjectStyleDefaults',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPath2DArcTo',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPatternFillProperties',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPolarAdjustHandle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPositiveFixedAngle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPresetShadowEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTPresetTextShape',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTQuickTimeFile',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTReflectionEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTScene3D',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTShape3D',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTShapeLocking',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTSoftEdgesEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTSupplementalFont',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableBackgroundStyle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTablePartStyle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBlipBullet',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletColorFollowText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletSizeFollowText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletTypefaceFollowText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextUnderlineFillFollowText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextUnderlineFillGroupWrapper',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextUnderlineLineFollowText',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTileInfoProperties',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTTintEffect',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTVideoFile',
- 'org.openxmlformats.schemas.drawingml.x2006.main.CTXYAdjustHandle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STBlackWhiteMode',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STBlipCompression',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STFixedAngle',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STGuid',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STPanose',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STPathFillMode',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STRectAlignment',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STTextColumnCount',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STTextNonNegativePoint',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STTextTabAlignType',
- 'org.openxmlformats.schemas.drawingml.x2006.main.STTileFlipMode',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTAdjustHandleListImpl$1AhPolarList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTAdjustHandleListImpl$1AhXYList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1BlipFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1GradFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1GrpFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1NoFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1PattFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBackgroundFillStyleListImpl$1SolidFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaBiLevelList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaCeilingList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaFloorList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaInvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaModFixList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1AlphaReplList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1BiLevelList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1BlurList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1ClrChangeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1ClrReplList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1DuotoneList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1FillOverlayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1GraysclList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1HslList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTBlipImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTConnectionSiteListImpl$1CxnList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTEffectStyleListImpl$1EffectStyleList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1BlipFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1GradFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1GrpFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1NoFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1PattFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFillStyleListImpl$1SolidFillList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTFontCollectionImpl$1FontList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTGeomGuideListImpl$1GdList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTGradientStopListImpl$1GsList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1AlphaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1AlphaOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1BlueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1BlueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1BlueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1CompList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1GammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1GrayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1GreenList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1GreenModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1GreenOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1HueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1HueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1HueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1InvGammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1InvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1LumModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1LumOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1RedList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1RedModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1RedOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1SatList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1SatModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1SatOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1ShadeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTHslColorImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTLineStyleListImpl$1LnList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTOfficeArtExtensionListImpl$1ExtList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DCubicBezierToImpl$1PtList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1ArcToList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1CloseList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1CubicBezToList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1LnToList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1MoveToList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DImpl$1QuadBezToList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPath2DListImpl$1PathList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1AlphaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1AlphaOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1BlueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1BlueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1BlueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1CompList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1GammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1GrayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1GreenList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1GreenModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1GreenOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1HueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1HueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1HueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1InvGammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1InvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1LumModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1LumOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1RedList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1RedModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1RedOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1SatList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1SatModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1SatOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1ShadeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1BlueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1BlueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1BlueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1CompList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1GammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1GrayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1GreenList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1GreenModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1GreenOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1HueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1HueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1HueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1InvGammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1InvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1LumModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1LumOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1RedList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1RedModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1RedOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1SatList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1SatModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1SatOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1ShadeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1AlphaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1AlphaOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1BlueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1BlueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1BlueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1CompList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1GammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1GrayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1GreenList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1GreenModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1GreenOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1HueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1HueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1HueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1InvGammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1InvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1LumModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1LumOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1RedList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1RedModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1RedOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1SatList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1SatModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1SatOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1ShadeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSchemeColorImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1AlphaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1AlphaModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1AlphaOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1BlueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1BlueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1BlueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1CompList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1GammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1GrayList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1GreenList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1GreenModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1GreenOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1HueList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1HueModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1HueOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1InvGammaList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1InvList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1LumList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1LumModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1LumOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1RedList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1RedModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1RedOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1SatList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1SatModList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1SatOffList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1ShadeList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSystemColorImpl$1TintList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTableGridImpl$1GridColList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTableImpl$1TrList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTableRowImpl$1TcList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTableStyleListImpl$1TblStyleList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTextBodyImpl$1PList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTextParagraphImpl$1BrList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTextParagraphImpl$1FldList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTextParagraphImpl$1RList',
- 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTTextTabStopListImpl$1TabList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.CTAbsoluteAnchor',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTDrawingImpl$1AbsoluteAnchorList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTDrawingImpl$1OneCellAnchorList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTDrawingImpl$1TwoCellAnchorList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTGroupShapeImpl$1CxnSpList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTGroupShapeImpl$1GraphicFrameList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTGroupShapeImpl$1GrpSpList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTGroupShapeImpl$1PicList',
- 'org.openxmlformats.schemas.drawingml.x2006.spreadsheetDrawing.impl.CTGroupShapeImpl$1SpList',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTEffectExtent',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTPosH',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTPosV',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTWrapNone',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTWrapSquare',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTWrapThrough',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTWrapTight',
- 'org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTWrapTopBottom',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.CTArray',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.CTCf',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.CTEmpty',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.CTNull',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.CTVstream',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.STCy',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.STError',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.STVectorBaseType',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1BoolList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1BstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1CfList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1ClsidList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1CyList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1DateList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1ErrorList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1FiletimeList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1I1List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1I2List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1I4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1I8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1LpstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1LpwstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1R4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1R8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1Ui1List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1Ui2List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1Ui4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1Ui8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$1VariantList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2BoolList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2BstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2ClsidList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2CyList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2DateList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2ErrorList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2FiletimeList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2I1List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2I2List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2I4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2I8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2LpstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2LpwstrList',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2R4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2R8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2Ui1List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2Ui2List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2Ui4List',
- 'org.openxmlformats.schemas.officeDocument.x2006.docPropsVTypes.impl.CTVectorImpl$2Ui8List',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTAcc',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTBar',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTBorderBox',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTBox',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTD',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTEqArr',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTF',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTFunc',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTGroupChr',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTLimLow',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTLimUpp',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTM',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTMathPr',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTNary',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTOMath',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTOMathPara',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTPhant',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTR',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTRad',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTSPre',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTSSub',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTSSubSup',
- 'org.openxmlformats.schemas.officeDocument.x2006.math.CTSSup',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTControlList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTCustomShowList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTCustomerData',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTEmbeddedFontList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTExtensionList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTExtensionListModify',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTHandoutMasterIdList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTHeaderFooter',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTKinsoku',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTModifyVerifier',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTPhotoAlbum',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTSlideLayoutIdList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTSlideTiming',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTSlideTransition',
- 'org.openxmlformats.schemas.presentationml.x2006.main.CTSmartTags',
- 'org.openxmlformats.schemas.presentationml.x2006.main.STBookmarkIdSeed',
- 'org.openxmlformats.schemas.presentationml.x2006.main.STDirection',
- 'org.openxmlformats.schemas.presentationml.x2006.main.STIndex',
- 'org.openxmlformats.schemas.presentationml.x2006.main.STPlaceholderSize',
- 'org.openxmlformats.schemas.presentationml.x2006.main.STSlideSizeType',
- 'org.openxmlformats.schemas.presentationml.x2006.main.impl.CTCommentAuthorListImpl$1CmAuthorList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.impl.CTCommentListImpl$1CmList',
- 'org.openxmlformats.schemas.presentationml.x2006.main.impl.CTCustomerDataListImpl$1CustDataList',
- 'org.openxmlformats.schemas.schemaLibrary.x2006.main.CTSchemaLibrary',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTAutoSortScope',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTBoolean',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCacheHierarchies',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCalculatedItems',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCalculatedMembers',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCellStyles',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCellWatches',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTChartFormats',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTChartsheetPr',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTChartsheetProtection',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTChartsheetViews',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTColHierarchiesUsage',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTColItems',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTColors',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTConditionalFormats',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTConsolidation',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTControls',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCsPageSetup',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCustomChartsheetViews',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCustomProperties',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCustomSheetViews',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTCustomWorkbookViews',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTDataBinding',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTDataConsolidate',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTDateTime',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTDdeLink',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTDimensions',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTError',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTExtensionList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTExternalSheetDataSet',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFieldGroup',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFileRecoveryPr',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFileSharing',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFileVersion',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFilterColumn',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFormats',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTFunctionGroups',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTGradientFill',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTMeasureDimensionMaps',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTMeasureGroups',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTMissing',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTNumber',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleLink',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleObjects',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleSize',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPCDKPIs',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPhoneticRun',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotFilters',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotHierarchies',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotSelection',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTProtectedRanges',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTRecord',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTRowHierarchiesUsage',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTRowItems',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTScenarios',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTSheetBackgroundPicture',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTSmartTagPr',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTSmartTagTypes',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTSmartTags',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTSortState',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTString',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTTableFormula',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTTableStyles',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTTupleCache',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTWebPublishItems',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTWebPublishObjects',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTWebPublishing',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTX',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STCellSpans',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STDataValidationImeMode',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STFieldSortType',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STGuid',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STObjects',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPhoneticAlignment',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPhoneticType',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPrintError',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STRefMode',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STSheetViewType',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STShowDataAs',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STTableType',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STTimePeriod',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STTotalsRowFunction',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STUpdateLinks',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STVisibility',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTAuthorsImpl$1AuthorList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTAuthorsImpl$2AuthorList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTAutoFilterImpl$1FilterColumnList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTBookViewsImpl$1WorkbookViewList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTBordersImpl$1BorderList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCacheFieldImpl$1MpMapList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCacheFieldsImpl$1CacheFieldList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCellStyleXfsImpl$1XfList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCellXfsImpl$1XfList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCfRuleImpl$1FormulaList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCfRuleImpl$2FormulaList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTColFieldsImpl$1FieldList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTColorScaleImpl$1CfvoList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTColorScaleImpl$1ColorList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTCommentListImpl$1CommentList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTConditionalFormattingImpl$1CfRuleList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTDataBarImpl$1CfvoList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTDataValidationsImpl$1DataValidationList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTDxfsImpl$1DxfList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTExternalDefinedNamesImpl$1DefinedNameList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTExternalReferencesImpl$1ExternalReferenceList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTExternalSheetNamesImpl$1SheetNameList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFillsImpl$1FillList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1BList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1CharsetList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1ColorList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1CondenseList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1ExtendList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1FamilyList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1IList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1NameList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1OutlineList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1SchemeList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1ShadowList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1StrikeList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1SzList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1UList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontImpl$1VertAlignList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTFontsImpl$1FontList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTHyperlinksImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTIconSetImpl$1CfvoList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTItemsImpl$1ItemList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMapInfoImpl$1MapList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMapInfoImpl$1SchemaList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMergeCellsImpl$1MergeCellList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTNumFmtsImpl$1NumFmtList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageBreakImpl$1BrkList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageFieldsImpl$1PageFieldList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCacheRecordsImpl$1RList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCachesImpl$1PivotCacheList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotFieldsImpl$1PivotFieldList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1BList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1CharsetList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1ColorList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1CondenseList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1ExtendList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1FamilyList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1IList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1OutlineList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1RFontList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1SchemeList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1ShadowList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1StrikeList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1SzList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1UList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRPrEltImpl$1VertAlignList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRowFieldsImpl$1FieldList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRowImpl$1CList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRstImpl$1RList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTRstImpl$1RPhList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1BList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1DList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1EList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1MList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1NList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1SList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetDataImpl$1RowList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewImpl$1PivotSelectionList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewImpl$1SelectionList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewsImpl$1SheetViewList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSingleXmlCellsImpl$1SingleXmlCellList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSstImpl$1SiList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTTableColumnsImpl$1TableColumnList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTTablePartsImpl$1TablePartList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTWorkbookImpl$1FileRecoveryPrList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTWorksheetImpl$1ColsList',
- 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTWorksheetImpl$1ConditionalFormattingList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTAltChunk',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTAttr',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTBackground',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCaptions',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCellMergeTrackChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCharacterSpacing',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCnf',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTColorSchemeMapping',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTColumns',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCompat',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTControl',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCustomXmlBlock',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCustomXmlCell',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCustomXmlRow',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTCustomXmlRun',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTDataBinding',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTDocGrid',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTDocRsids',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTDocType',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTDocVars',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTEastAsianLayout',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTEdnDocProps',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTEdnProps',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTEm',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFFDDList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFFHelpText',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFFName',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFFStatusText',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFFTextInput',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFitText',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFramePr',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFtnDocProps',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTFtnProps',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTKinsoku',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTLevelSuffix',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTLineNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTLock',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTLongHexNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTLvlLegacy',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTMacroName',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTMailMerge',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTMultiLevelType',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTNumPicBullet',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPageBorders',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPageMar',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPageNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPageSz',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPaperSource',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTParaRPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPerm',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPermStart',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTPlaceholder',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTProof',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTRPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTReadingModeInkLockDown',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTRuby',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSaveThroughXslt',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSdtComboBox',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSdtDate',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSdtDropDownList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSdtRow',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSdtText',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSectPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSectType',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTShapeDefaults',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTShortHexNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSignedTwipsMeasure',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTSmartTagType',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblGridChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblLayoutType',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblOverlap',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblPPr',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblPrExChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTblStylePr',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTcMar',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTcPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTextDirection',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTextEffect',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTextScale',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTextboxTightWrap',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTrPrChange',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTrackChangeNumbering',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTrackChangesView',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTTwipsMeasure',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTView',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTWriteProtection',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.CTWritingStyle',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STDateTime',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STDisplacedByCustomXml',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STHeightRule',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STHint',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STPTabAlignment',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STPTabLeader',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STPTabRelativeTo',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STProofErr',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STShortHexNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STThemeColor',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STUcharHexNumber',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.STZoom',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTAbstractNumImpl$1LvlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTBodyImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTCommentsImpl$1CommentList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTDrawingImpl$1AnchorList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTDrawingImpl$1InlineList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTEndnotesImpl$1EndnoteList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1CalcOnExitList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1DdListList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1EnabledList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1EntryMacroList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1ExitMacroList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1HelpTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1NameList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1StatusTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFFDataImpl$1TextInputList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFootnotesImpl$1FootnoteList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTFtnEdnImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHdrFtrImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1FldSimpleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTHyperlinkImpl$1SubDocList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTLatentStylesImpl$1LsdExceptionList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTNumImpl$1LvlOverrideList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTNumberingImpl$1AbstractNumList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTNumberingImpl$1NumList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTNumberingImpl$1NumPicBulletList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1FldSimpleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTPImpl$1SubDocList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1AnnotationRefList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1BrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1CommentReferenceList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1ContinuationSeparatorList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1CrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1DayLongList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1DayShortList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1DelInstrTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1DelTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1DrawingList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1EndnoteRefList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1EndnoteReferenceList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1FldCharList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1FootnoteRefList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1FootnoteReferenceList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1InstrTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1LastRenderedPageBreakList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1MonthLongList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1MonthShortList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1NoBreakHyphenList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1ObjectList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1PgNumList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1PictList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1PtabList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1RubyList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1SeparatorList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1SoftHyphenList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1SymList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1TList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1TabList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1YearLongList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRImpl$1YearShortList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRowImpl$1TcList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1AccList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1BarList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1BorderBoxList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1BoxList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1DList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1EqArrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1FList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1FuncList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1GroupChrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1LimLowList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1LimUppList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1NaryList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1PhantList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1R2List',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1RadList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SPreList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SSubList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SSubSupList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SSupList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTRunTrackChangeImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentBlockImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentCellImpl$1TcList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1FldSimpleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtContentRunImpl$1SubDocList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtEndPrImpl$1RPrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1AliasList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1BibliographyList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1CitationList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1ComboBoxList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1DataBindingList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1DateList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1DocPartListList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1DocPartObjList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1DropDownListList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1EquationList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1GroupList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1IdList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1LockList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1PictureList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1PlaceholderList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1RPrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1RichTextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1ShowingPlcHdrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1TagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1TemporaryList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSdtPrImpl$1TextList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSectPrImpl$1FooterReferenceList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSectPrImpl$1HeaderReferenceList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSettingsImpl$1ActiveWritingStyleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSettingsImpl$1AttachedSchemaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSettingsImpl$1SmartTagTypeList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1FldSimpleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSimpleFieldImpl$1SubDocList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagPrImpl$1AttrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1FldSimpleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1HyperlinkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1RList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1SmartTagList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTSmartTagRunImpl$1SubDocList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTStyleImpl$1TblStylePrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTStylesImpl$1StyleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTabsImpl$1TabList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblGridBaseImpl$1GridColList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTblImpl$1TrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTcImpl$1TblList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1CantSplitList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1CnfStyleList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1DivIdList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1GridAfterList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1GridBeforeList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1HiddenList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1JcList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1TblCellSpacingList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1TblHeaderList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1TrHeightList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1WAfterList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTrPrBaseImpl$1WBeforeList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1AltChunkList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1BookmarkEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1BookmarkStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CommentRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CommentRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlDelRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlDelRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlInsRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlInsRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlMoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlMoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlMoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1CustomXmlMoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1DelList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1InsList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveFromList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveFromRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveFromRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveToList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveToRangeEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1MoveToRangeStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1OMathList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1OMathParaList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1PList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1PermEndList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1PermStartList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1ProofErrList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1SdtList',
- 'org.openxmlformats.schemas.wordprocessingml.x2006.main.impl.CTTxbxContentImpl$1TblList',
- 'org.osgi.framework.BundleActivator',
- 'org.osgi.framework.BundleContext',
- 'org.osgi.framework.ServiceReference',
- 'org.osgi.framework.ServiceRegistration',
- 'org.osgi.util.tracker.ServiceTracker',
- 'org.osgi.util.tracker.ServiceTrackerCustomizer',
- 'org.slf4j.Logger',
- 'org.slf4j.LoggerFactory',
- 'org.sqlite.SQLiteConfig',
- 'org.tukaani.xz.ARMOptions',
- 'org.tukaani.xz.ARMThumbOptions',
- 'org.tukaani.xz.DeltaOptions',
- 'org.tukaani.xz.FilterOptions',
- 'org.tukaani.xz.FinishableWrapperOutputStream',
- 'org.tukaani.xz.IA64Options',
- 'org.tukaani.xz.LZMA2InputStream',
- 'org.tukaani.xz.LZMA2Options',
- 'org.tukaani.xz.LZMAInputStream',
- 'org.tukaani.xz.PowerPCOptions',
- 'org.tukaani.xz.SPARCOptions',
- 'org.tukaani.xz.SingleXZInputStream',
- 'org.tukaani.xz.UnsupportedOptionsException',
- 'org.tukaani.xz.X86Options',
- 'org.tukaani.xz.XZ',
- 'org.tukaani.xz.XZInputStream',
- 'org.tukaani.xz.XZOutputStream',
- 'org.w3.x2000.x09.xmldsig.KeyInfoType',
- 'org.w3.x2000.x09.xmldsig.SignatureMethodType',
- 'org.w3.x2000.x09.xmldsig.SignatureValueType',
- 'org.w3.x2000.x09.xmldsig.TransformsType',
- 'org.w3.x2000.x09.xmldsig.impl.SignatureTypeImpl$1ObjectList',
- 'org.w3.x2000.x09.xmldsig.impl.SignedInfoTypeImpl$1ReferenceList',
- 'org.w3.x2000.x09.xmldsig.impl.TransformTypeImpl$1XPathList',
- 'org.w3.x2000.x09.xmldsig.impl.TransformTypeImpl$2XPathList',
- 'ucar.ma2.DataType',
- 'ucar.nc2.Attribute',
- 'ucar.nc2.Dimension',
- 'ucar.nc2.Group',
- 'ucar.nc2.NetcdfFile',
- 'ucar.nc2.Variable',
- 'ucar.nc2.dataset.NetcdfDataset'
-]
diff --git a/plugins/mapper-attachments/licenses/bcmail-jdk15on-1.54.jar.sha1 b/plugins/mapper-attachments/licenses/bcmail-jdk15on-1.54.jar.sha1
deleted file mode 100644
index 79da45c5c4..0000000000
--- a/plugins/mapper-attachments/licenses/bcmail-jdk15on-1.54.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d9b5432b4b29ef4a853223bc6e19379ef116cca \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/bcmail-jdk15on-LICENSE.txt b/plugins/mapper-attachments/licenses/bcmail-jdk15on-LICENSE.txt
deleted file mode 100644
index dbba1dd782..0000000000
--- a/plugins/mapper-attachments/licenses/bcmail-jdk15on-LICENSE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc.
- (http://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-1.54.jar.sha1 b/plugins/mapper-attachments/licenses/bcpkix-jdk15on-1.54.jar.sha1
deleted file mode 100644
index 2d0c3cf4e2..0000000000
--- a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-1.54.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b11bfee99bb11eea344de6e4a07fe89212c55c02 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-LICENSE.txt b/plugins/mapper-attachments/licenses/bcpkix-jdk15on-LICENSE.txt
deleted file mode 100644
index e1fc4a1506..0000000000
--- a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-LICENSE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc.
- (http://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-NOTICE.txt b/plugins/mapper-attachments/licenses/bcpkix-jdk15on-NOTICE.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/licenses/bcpkix-jdk15on-NOTICE.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/licenses/bcprov-jdk15on-1.54.jar.sha1 b/plugins/mapper-attachments/licenses/bcprov-jdk15on-1.54.jar.sha1
deleted file mode 100644
index fcda646b42..0000000000
--- a/plugins/mapper-attachments/licenses/bcprov-jdk15on-1.54.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1acdedeb89f1d950d67b73d481eb7736df65eedb \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/bcprov-jdk15on-LICENSE.txt b/plugins/mapper-attachments/licenses/bcprov-jdk15on-LICENSE.txt
deleted file mode 100644
index e1fc4a1506..0000000000
--- a/plugins/mapper-attachments/licenses/bcprov-jdk15on-LICENSE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc.
- (http://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/plugins/mapper-attachments/licenses/bcprov-jdk15on-NOTICE.txt b/plugins/mapper-attachments/licenses/bcprov-jdk15on-NOTICE.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/licenses/bcprov-jdk15on-NOTICE.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/licenses/commons-codec-1.10.jar.sha1 b/plugins/mapper-attachments/licenses/commons-codec-1.10.jar.sha1
deleted file mode 100644
index 3fe8682a1b..0000000000
--- a/plugins/mapper-attachments/licenses/commons-codec-1.10.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/commons-codec-NOTICE.txt b/plugins/mapper-attachments/licenses/commons-codec-NOTICE.txt
deleted file mode 100644
index 72eb32a902..0000000000
--- a/plugins/mapper-attachments/licenses/commons-codec-NOTICE.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Commons CLI
-Copyright 2001-2009 The Apache Software Foundation
-
-This product includes software developed by
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/plugins/mapper-attachments/licenses/commons-compress-1.10.jar.sha1 b/plugins/mapper-attachments/licenses/commons-compress-1.10.jar.sha1
deleted file mode 100644
index 65c74b9a88..0000000000
--- a/plugins/mapper-attachments/licenses/commons-compress-1.10.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5eeb27c57eece1faf2d837868aeccc94d84dcc9a \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/commons-compress-LICENSE.txt b/plugins/mapper-attachments/licenses/commons-compress-LICENSE.txt
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/plugins/mapper-attachments/licenses/commons-compress-LICENSE.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/plugins/mapper-attachments/licenses/commons-compress-NOTICE.txt b/plugins/mapper-attachments/licenses/commons-compress-NOTICE.txt
deleted file mode 100644
index edd2f2c78e..0000000000
--- a/plugins/mapper-attachments/licenses/commons-compress-NOTICE.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Apache Commons Compress
-Copyright 2002-2015 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-The files in the package org.apache.commons.compress.archivers.sevenz
-were derived from the LZMA SDK, version 9.20 (C/ and CPP/7zip/),
-which has been placed in the public domain:
-
-"LZMA SDK is placed in the public domain." (http://www.7-zip.org/sdk.html)
diff --git a/plugins/mapper-attachments/licenses/commons-io-2.4.jar.sha1 b/plugins/mapper-attachments/licenses/commons-io-2.4.jar.sha1
deleted file mode 100644
index 688318c938..0000000000
--- a/plugins/mapper-attachments/licenses/commons-io-2.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b1b6ea3b7e4aa4f492509a4952029cd8e48019ad
diff --git a/plugins/mapper-attachments/licenses/commons-io-LICENSE.txt b/plugins/mapper-attachments/licenses/commons-io-LICENSE.txt
deleted file mode 100644
index d645695673..0000000000
--- a/plugins/mapper-attachments/licenses/commons-io-LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/plugins/mapper-attachments/licenses/commons-io-NOTICE.txt b/plugins/mapper-attachments/licenses/commons-io-NOTICE.txt
deleted file mode 100644
index a6b77d1eb6..0000000000
--- a/plugins/mapper-attachments/licenses/commons-io-NOTICE.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Commons IO
-Copyright 2002-2014 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/plugins/mapper-attachments/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/mapper-attachments/licenses/commons-logging-1.1.3.jar.sha1
deleted file mode 100644
index 5b8f029e58..0000000000
--- a/plugins/mapper-attachments/licenses/commons-logging-1.1.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/commons-logging-LICENSE.txt b/plugins/mapper-attachments/licenses/commons-logging-LICENSE.txt
deleted file mode 100644
index d645695673..0000000000
--- a/plugins/mapper-attachments/licenses/commons-logging-LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/plugins/mapper-attachments/licenses/commons-logging-NOTICE.txt b/plugins/mapper-attachments/licenses/commons-logging-NOTICE.txt
deleted file mode 100644
index d3d6e140ce..0000000000
--- a/plugins/mapper-attachments/licenses/commons-logging-NOTICE.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Commons Logging
-Copyright 2003-2014 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/plugins/mapper-attachments/licenses/fontbox-2.0.1.jar.sha1 b/plugins/mapper-attachments/licenses/fontbox-2.0.1.jar.sha1
deleted file mode 100644
index 0668199b24..0000000000
--- a/plugins/mapper-attachments/licenses/fontbox-2.0.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b9d4f0993e015f3f1ce0be9e7300cf62dd7a7f15 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/fontbox-LICENSE.txt b/plugins/mapper-attachments/licenses/fontbox-LICENSE.txt
deleted file mode 100644
index 97553f24a4..0000000000
--- a/plugins/mapper-attachments/licenses/fontbox-LICENSE.txt
+++ /dev/null
@@ -1,344 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-EXTERNAL COMPONENTS
-
-Apache PDFBox includes a number of components with separate copyright notices
-and license terms. Your use of these components is subject to the terms and
-conditions of the following licenses.
-
-Contributions made to the original PDFBox and FontBox projects:
-
- Copyright (c) 2002-2007, www.pdfbox.org
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of pdfbox; nor the names of its contributors may be
- used to endorse or promote products derived from this software without
- specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- SUCH DAMAGE.
-
-Adobe Font Metrics (AFM) for PDF Core 14 Fonts
-
- This file and the 14 PostScript(R) AFM files it accompanies may be used,
- copied, and distributed for any purpose and without charge, with or without
- modification, provided that all copyright notices are retained; that the
- AFM files are not distributed without this file; that all modifications
- to this file or any of the AFM files are prominently noted in the modified
- file(s); and that this paragraph is not modified. Adobe Systems has no
- responsibility or obligation to support the use of the AFM files.
-
-CMaps for PDF Fonts (http://opensource.adobe.com/wiki/display/cmap/Downloads)
-
- Copyright 1990-2009 Adobe Systems Incorporated.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- Neither the name of Adobe Systems Incorporated nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- THE POSSIBILITY OF SUCH DAMAGE.
-
-PaDaF PDF/A preflight (http://sourceforge.net/projects/padaf)
-
- Copyright 2010 Atos Worldline SAS
-
- Licensed by Atos Worldline SAS under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- Atos Worldline SAS licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-OSXAdapter
-
- Version: 2.0
-
- Disclaimer: IMPORTANT: This Apple software is supplied to you by
- Apple Inc. ("Apple") in consideration of your agreement to the
- following terms, and your use, installation, modification or
- redistribution of this Apple software constitutes acceptance of these
- terms. If you do not agree with these terms, please do not use,
- install, modify or redistribute this Apple software.
-
- In consideration of your agreement to abide by the following terms, and
- subject to these terms, Apple grants you a personal, non-exclusive
- license, under Apple's copyrights in this original Apple software (the
- "Apple Software"), to use, reproduce, modify and redistribute the Apple
- Software, with or without modifications, in source and/or binary forms;
- provided that if you redistribute the Apple Software in its entirety and
- without modifications, you must retain this notice and the following
- text and disclaimers in all such redistributions of the Apple Software.
- Neither the name, trademarks, service marks or logos of Apple Inc.
- may be used to endorse or promote products derived from the Apple
- Software without specific prior written permission from Apple. Except
- as expressly stated in this notice, no other rights or licenses, express
- or implied, are granted by Apple herein, including but not limited to
- any patent rights that may be infringed by your derivative works or by
- other works in which the Apple Software may be incorporated.
-
- The Apple Software is provided by Apple on an "AS IS" basis. APPLE
- MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
- THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
- FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
- OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
-
- IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
- OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
- MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
- AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
- STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
- Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved
diff --git a/plugins/mapper-attachments/licenses/fontbox-NOTICE.txt b/plugins/mapper-attachments/licenses/fontbox-NOTICE.txt
deleted file mode 100644
index 3c85708256..0000000000
--- a/plugins/mapper-attachments/licenses/fontbox-NOTICE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Apache PDFBox
-Copyright 2014 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Based on source code originally developed in the PDFBox and
-FontBox projects.
-
-Copyright (c) 2002-2007, www.pdfbox.org
-
-Based on source code originally developed in the PaDaF project.
-Copyright (c) 2010 Atos Worldline SAS
-
-Includes the Adobe Glyph List
-Copyright 1997, 1998, 2002, 2007, 2010 Adobe Systems Incorporated.
-
-Includes the Zapf Dingbats Glyph List
-Copyright 2002, 2010 Adobe Systems Incorporated.
-
-Includes OSXAdapter
-Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved
diff --git a/plugins/mapper-attachments/licenses/jempbox-1.8.12.jar.sha1 b/plugins/mapper-attachments/licenses/jempbox-1.8.12.jar.sha1
deleted file mode 100644
index 0e3dcf4573..0000000000
--- a/plugins/mapper-attachments/licenses/jempbox-1.8.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-426450c573c19f6f2c751a7a52c11931b712c9f6 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/jempbox-LICENSE.txt b/plugins/mapper-attachments/licenses/jempbox-LICENSE.txt
deleted file mode 100644
index 1cf412f9c5..0000000000
--- a/plugins/mapper-attachments/licenses/jempbox-LICENSE.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2006-2007, www.jempbox.org
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-3. Neither the name of fontbox; nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/plugins/mapper-attachments/licenses/jempbox-NOTICE.txt b/plugins/mapper-attachments/licenses/jempbox-NOTICE.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/licenses/jempbox-NOTICE.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/licenses/juniversalchardet-1.0.3.jar.sha1 b/plugins/mapper-attachments/licenses/juniversalchardet-1.0.3.jar.sha1
deleted file mode 100644
index 6b06952678..0000000000
--- a/plugins/mapper-attachments/licenses/juniversalchardet-1.0.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cd49678784c46aa8789c060538e0154013bb421b
diff --git a/plugins/mapper-attachments/licenses/juniversalchardet-LICENSE.txt b/plugins/mapper-attachments/licenses/juniversalchardet-LICENSE.txt
deleted file mode 100644
index 06f965147a..0000000000
--- a/plugins/mapper-attachments/licenses/juniversalchardet-LICENSE.txt
+++ /dev/null
@@ -1,469 +0,0 @@
- MOZILLA PUBLIC LICENSE
- Version 1.1
-
- ---------------
-
-1. Definitions.
-
- 1.0.1. "Commercial Use" means distribution or otherwise making the
- Covered Code available to a third party.
-
- 1.1. "Contributor" means each entity that creates or contributes to
- the creation of Modifications.
-
- 1.2. "Contributor Version" means the combination of the Original
- Code, prior Modifications used by a Contributor, and the Modifications
- made by that particular Contributor.
-
- 1.3. "Covered Code" means the Original Code or Modifications or the
- combination of the Original Code and Modifications, in each case
- including portions thereof.
-
- 1.4. "Electronic Distribution Mechanism" means a mechanism generally
- accepted in the software development community for the electronic
- transfer of data.
-
- 1.5. "Executable" means Covered Code in any form other than Source
- Code.
-
- 1.6. "Initial Developer" means the individual or entity identified
- as the Initial Developer in the Source Code notice required by Exhibit
- A.
-
- 1.7. "Larger Work" means a work which combines Covered Code or
- portions thereof with code not governed by the terms of this License.
-
- 1.8. "License" means this document.
-
- 1.8.1. "Licensable" means having the right to grant, to the maximum
- extent possible, whether at the time of the initial grant or
- subsequently acquired, any and all of the rights conveyed herein.
-
- 1.9. "Modifications" means any addition to or deletion from the
- substance or structure of either the Original Code or any previous
- Modifications. When Covered Code is released as a series of files, a
- Modification is:
- A. Any addition to or deletion from the contents of a file
- containing Original Code or previous Modifications.
-
- B. Any new file that contains any part of the Original Code or
- previous Modifications.
-
- 1.10. "Original Code" means Source Code of computer software code
- which is described in the Source Code notice required by Exhibit A as
- Original Code, and which, at the time of its release under this
- License is not already Covered Code governed by this License.
-
- 1.10.1. "Patent Claims" means any patent claim(s), now owned or
- hereafter acquired, including without limitation, method, process,
- and apparatus claims, in any patent Licensable by grantor.
-
- 1.11. "Source Code" means the preferred form of the Covered Code for
- making modifications to it, including all modules it contains, plus
- any associated interface definition files, scripts used to control
- compilation and installation of an Executable, or source code
- differential comparisons against either the Original Code or another
- well known, available Covered Code of the Contributor's choice. The
- Source Code can be in a compressed or archival form, provided the
- appropriate decompression or de-archiving software is widely available
- for no charge.
-
- 1.12. "You" (or "Your") means an individual or a legal entity
- exercising rights under, and complying with all of the terms of, this
- License or a future version of this License issued under Section 6.1.
- For legal entities, "You" includes any entity which controls, is
- controlled by, or is under common control with You. For purposes of
- this definition, "control" means (a) the power, direct or indirect,
- to cause the direction or management of such entity, whether by
- contract or otherwise, or (b) ownership of more than fifty percent
- (50%) of the outstanding shares or beneficial ownership of such
- entity.
-
-2. Source Code License.
-
- 2.1. The Initial Developer Grant.
- The Initial Developer hereby grants You a world-wide, royalty-free,
- non-exclusive license, subject to third party intellectual property
- claims:
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Initial Developer to use, reproduce,
- modify, display, perform, sublicense and distribute the Original
- Code (or portions thereof) with or without Modifications, and/or
- as part of a Larger Work; and
-
- (b) under Patents Claims infringed by the making, using or
- selling of Original Code, to make, have made, use, practice,
- sell, and offer for sale, and/or otherwise dispose of the
- Original Code (or portions thereof).
-
- (c) the licenses granted in this Section 2.1(a) and (b) are
- effective on the date Initial Developer first distributes
- Original Code under the terms of this License.
-
- (d) Notwithstanding Section 2.1(b) above, no patent license is
- granted: 1) for code that You delete from the Original Code; 2)
- separate from the Original Code; or 3) for infringements caused
- by: i) the modification of the Original Code or ii) the
- combination of the Original Code with other software or devices.
-
- 2.2. Contributor Grant.
- Subject to third party intellectual property claims, each Contributor
- hereby grants You a world-wide, royalty-free, non-exclusive license
-
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Contributor, to use, reproduce, modify,
- display, perform, sublicense and distribute the Modifications
- created by such Contributor (or portions thereof) either on an
- unmodified basis, with other Modifications, as Covered Code
- and/or as part of a Larger Work; and
-
- (b) under Patent Claims infringed by the making, using, or
- selling of Modifications made by that Contributor either alone
- and/or in combination with its Contributor Version (or portions
- of such combination), to make, use, sell, offer for sale, have
- made, and/or otherwise dispose of: 1) Modifications made by that
- Contributor (or portions thereof); and 2) the combination of
- Modifications made by that Contributor with its Contributor
- Version (or portions of such combination).
-
- (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
- effective on the date Contributor first makes Commercial Use of
- the Covered Code.
-
- (d) Notwithstanding Section 2.2(b) above, no patent license is
- granted: 1) for any code that Contributor has deleted from the
- Contributor Version; 2) separate from the Contributor Version;
- 3) for infringements caused by: i) third party modifications of
- Contributor Version or ii) the combination of Modifications made
- by that Contributor with other software (except as part of the
- Contributor Version) or other devices; or 4) under Patent Claims
- infringed by Covered Code in the absence of Modifications made by
- that Contributor.
-
-3. Distribution Obligations.
-
- 3.1. Application of License.
- The Modifications which You create or to which You contribute are
- governed by the terms of this License, including without limitation
- Section 2.2. The Source Code version of Covered Code may be
- distributed only under the terms of this License or a future version
- of this License released under Section 6.1, and You must include a
- copy of this License with every copy of the Source Code You
- distribute. You may not offer or impose any terms on any Source Code
- version that alters or restricts the applicable version of this
- License or the recipients' rights hereunder. However, You may include
- an additional document offering the additional rights described in
- Section 3.5.
-
- 3.2. Availability of Source Code.
- Any Modification which You create or to which You contribute must be
- made available in Source Code form under the terms of this License
- either on the same media as an Executable version or via an accepted
- Electronic Distribution Mechanism to anyone to whom you made an
- Executable version available; and if made available via Electronic
- Distribution Mechanism, must remain available for at least twelve (12)
- months after the date it initially became available, or at least six
- (6) months after a subsequent version of that particular Modification
- has been made available to such recipients. You are responsible for
- ensuring that the Source Code version remains available even if the
- Electronic Distribution Mechanism is maintained by a third party.
-
- 3.3. Description of Modifications.
- You must cause all Covered Code to which You contribute to contain a
- file documenting the changes You made to create that Covered Code and
- the date of any change. You must include a prominent statement that
- the Modification is derived, directly or indirectly, from Original
- Code provided by the Initial Developer and including the name of the
- Initial Developer in (a) the Source Code, and (b) in any notice in an
- Executable version or related documentation in which You describe the
- origin or ownership of the Covered Code.
-
- 3.4. Intellectual Property Matters
- (a) Third Party Claims.
- If Contributor has knowledge that a license under a third party's
- intellectual property rights is required to exercise the rights
- granted by such Contributor under Sections 2.1 or 2.2,
- Contributor must include a text file with the Source Code
- distribution titled "LEGAL" which describes the claim and the
- party making the claim in sufficient detail that a recipient will
- know whom to contact. If Contributor obtains such knowledge after
- the Modification is made available as described in Section 3.2,
- Contributor shall promptly modify the LEGAL file in all copies
- Contributor makes available thereafter and shall take other steps
- (such as notifying appropriate mailing lists or newsgroups)
- reasonably calculated to inform those who received the Covered
- Code that new knowledge has been obtained.
-
- (b) Contributor APIs.
- If Contributor's Modifications include an application programming
- interface and Contributor has knowledge of patent licenses which
- are reasonably necessary to implement that API, Contributor must
- also include this information in the LEGAL file.
-
- (c) Representations.
- Contributor represents that, except as disclosed pursuant to
- Section 3.4(a) above, Contributor believes that Contributor's
- Modifications are Contributor's original creation(s) and/or
- Contributor has sufficient rights to grant the rights conveyed by
- this License.
-
- 3.5. Required Notices.
- You must duplicate the notice in Exhibit A in each file of the Source
- Code. If it is not possible to put such notice in a particular Source
- Code file due to its structure, then You must include such notice in a
- location (such as a relevant directory) where a user would be likely
- to look for such a notice. If You created one or more Modification(s)
- You may add your name as a Contributor to the notice described in
- Exhibit A. You must also duplicate this License in any documentation
- for the Source Code where You describe recipients' rights or ownership
- rights relating to Covered Code. You may choose to offer, and to
- charge a fee for, warranty, support, indemnity or liability
- obligations to one or more recipients of Covered Code. However, You
- may do so only on Your own behalf, and not on behalf of the Initial
- Developer or any Contributor. You must make it absolutely clear than
- any such warranty, support, indemnity or liability obligation is
- offered by You alone, and You hereby agree to indemnify the Initial
- Developer and every Contributor for any liability incurred by the
- Initial Developer or such Contributor as a result of warranty,
- support, indemnity or liability terms You offer.
-
- 3.6. Distribution of Executable Versions.
- You may distribute Covered Code in Executable form only if the
- requirements of Section 3.1-3.5 have been met for that Covered Code,
- and if You include a notice stating that the Source Code version of
- the Covered Code is available under the terms of this License,
- including a description of how and where You have fulfilled the
- obligations of Section 3.2. The notice must be conspicuously included
- in any notice in an Executable version, related documentation or
- collateral in which You describe recipients' rights relating to the
- Covered Code. You may distribute the Executable version of Covered
- Code or ownership rights under a license of Your choice, which may
- contain terms different from this License, provided that You are in
- compliance with the terms of this License and that the license for the
- Executable version does not attempt to limit or alter the recipient's
- rights in the Source Code version from the rights set forth in this
- License. If You distribute the Executable version under a different
- license You must make it absolutely clear that any terms which differ
- from this License are offered by You alone, not by the Initial
- Developer or any Contributor. You hereby agree to indemnify the
- Initial Developer and every Contributor for any liability incurred by
- the Initial Developer or such Contributor as a result of any such
- terms You offer.
-
- 3.7. Larger Works.
- You may create a Larger Work by combining Covered Code with other code
- not governed by the terms of this License and distribute the Larger
- Work as a single product. In such a case, You must make sure the
- requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
- If it is impossible for You to comply with any of the terms of this
- License with respect to some or all of the Covered Code due to
- statute, judicial order, or regulation then You must: (a) comply with
- the terms of this License to the maximum extent possible; and (b)
- describe the limitations and the code they affect. Such description
- must be included in the LEGAL file described in Section 3.4 and must
- be included with all distributions of the Source Code. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Application of this License.
-
- This License applies to code to which the Initial Developer has
- attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
- 6.1. New Versions.
- Netscape Communications Corporation ("Netscape") may publish revised
- and/or new versions of the License from time to time. Each version
- will be given a distinguishing version number.
-
- 6.2. Effect of New Versions.
- Once Covered Code has been published under a particular version of the
- License, You may always continue to use it under the terms of that
- version. You may also choose to use such Covered Code under the terms
- of any subsequent version of the License published by Netscape. No one
- other than Netscape has the right to modify the terms applicable to
- Covered Code created under this License.
-
- 6.3. Derivative Works.
- If You create or use a modified version of this License (which you may
- only do in order to apply it to code which is not already Covered Code
- governed by this License), You must (a) rename Your license so that
- the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
- "MPL", "NPL" or any confusingly similar phrase do not appear in your
- license (except to note that your license differs from this License)
- and (b) otherwise make it clear that Your version of the license
- contains terms which differ from the Mozilla Public License and
- Netscape Public License. (Filling in the name of the Initial
- Developer, Original Code or Contributor in the notice described in
- Exhibit A shall not of themselves be deemed to be modifications of
- this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
- COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
- WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
- WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
- DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
- THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
- IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
- YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
- COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
- OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
- ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
- 8.1. This License and the rights granted hereunder will terminate
- automatically if You fail to comply with terms herein and fail to cure
- such breach within 30 days of becoming aware of the breach. All
- sublicenses to the Covered Code which are properly granted shall
- survive any termination of this License. Provisions which, by their
- nature, must remain in effect beyond the termination of this License
- shall survive.
-
- 8.2. If You initiate litigation by asserting a patent infringement
- claim (excluding declatory judgment actions) against Initial Developer
- or a Contributor (the Initial Developer or Contributor against whom
- You file such action is referred to as "Participant") alleging that:
-
- (a) such Participant's Contributor Version directly or indirectly
- infringes any patent, then any and all rights granted by such
- Participant to You under Sections 2.1 and/or 2.2 of this License
- shall, upon 60 days notice from Participant terminate prospectively,
- unless if within 60 days after receipt of notice You either: (i)
- agree in writing to pay Participant a mutually agreeable reasonable
- royalty for Your past and future use of Modifications made by such
- Participant, or (ii) withdraw Your litigation claim with respect to
- the Contributor Version against such Participant. If within 60 days
- of notice, a reasonable royalty and payment arrangement are not
- mutually agreed upon in writing by the parties or the litigation claim
- is not withdrawn, the rights granted by Participant to You under
- Sections 2.1 and/or 2.2 automatically terminate at the expiration of
- the 60 day notice period specified above.
-
- (b) any software, hardware, or device, other than such Participant's
- Contributor Version, directly or indirectly infringes any patent, then
- any rights granted to You by such Participant under Sections 2.1(b)
- and 2.2(b) are revoked effective as of the date You first made, used,
- sold, distributed, or had made, Modifications made by that
- Participant.
-
- 8.3. If You assert a patent infringement claim against Participant
- alleging that such Participant's Contributor Version directly or
- indirectly infringes any patent where such claim is resolved (such as
- by license or settlement) prior to the initiation of patent
- infringement litigation, then the reasonable value of the licenses
- granted by such Participant under Sections 2.1 or 2.2 shall be taken
- into account in determining the amount or value of any payment or
- license.
-
- 8.4. In the event of termination under Sections 8.1 or 8.2 above,
- all end user license agreements (excluding distributors and resellers)
- which have been validly granted by You or any distributor hereunder
- prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
- UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
- (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
- DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
- OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
- ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
- CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
- WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
- COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
- INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
- LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
- RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
- PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
- EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
- THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
- The Covered Code is a "commercial item," as that term is defined in
- 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
- software" and "commercial computer software documentation," as such
- terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
- C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
- all U.S. Government End Users acquire Covered Code with only those
- rights set forth herein.
-
-11. MISCELLANEOUS.
-
- This License represents the complete agreement concerning subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. This License shall be governed by
- California law provisions (except to the extent applicable law, if
- any, provides otherwise), excluding its conflict-of-law provisions.
- With respect to disputes in which at least one party is a citizen of,
- or an entity chartered or registered to do business in the United
- States of America, any litigation relating to this License shall be
- subject to the jurisdiction of the Federal Courts of the Northern
- District of California, with venue lying in Santa Clara County,
- California, with the losing party responsible for costs, including
- without limitation, court costs and reasonable attorneys' fees and
- expenses. The application of the United Nations Convention on
- Contracts for the International Sale of Goods is expressly excluded.
- Any law or regulation which provides that the language of a contract
- shall be construed against the drafter shall not apply to this
- License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
- As between Initial Developer and the Contributors, each party is
- responsible for claims and damages arising, directly or indirectly,
- out of its utilization of rights under this License and You agree to
- work with Initial Developer and Contributors to distribute such
- responsibility on an equitable basis. Nothing herein is intended or
- shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
- Initial Developer may designate portions of the Covered Code as
- "Multiple-Licensed". "Multiple-Licensed" means that the Initial
- Developer permits you to utilize portions of the Covered Code under
- Your choice of the NPL or the alternative licenses, if any, specified
- by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
- ``The contents of this file are subject to the Mozilla Public License
- Version 1.1 (the "License"); you may not use this file except in
- compliance with the License. You may obtain a copy of the License at
- http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS IS"
- basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
- License for the specific language governing rights and limitations
- under the License.
-
- The Original Code is ______________________________________.
-
- The Initial Developer of the Original Code is ________________________.
- Portions created by ______________________ are Copyright (C) ______
- _______________________. All Rights Reserved.
-
- Contributor(s): ______________________________________.
-
- Alternatively, the contents of this file may be used under the terms
- of the _____ license (the "[___] License"), in which case the
- provisions of [______] License are applicable instead of those
- above. If you wish to allow use of your version of this file only
- under the terms of the [____] License and not to allow others to use
- your version of this file under the MPL, indicate your decision by
- deleting the provisions above and replace them with the notice and
- other provisions required by the [___] License. If you do not delete
- the provisions above, a recipient may use your version of this file
- under either the MPL or the [___] License."
-
- [NOTE: The text of this Exhibit A may differ slightly from the text of
- the notices in the Source Code files of the Original Code. You should
- use the text of this Exhibit A rather than the text found in the
- Original Code Source Code for Your Modifications.]
diff --git a/plugins/mapper-attachments/licenses/juniversalchardet-NOTICE.txt b/plugins/mapper-attachments/licenses/juniversalchardet-NOTICE.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/licenses/juniversalchardet-NOTICE.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/licenses/pdfbox-2.0.1.jar.sha1 b/plugins/mapper-attachments/licenses/pdfbox-2.0.1.jar.sha1
deleted file mode 100644
index 1014db3404..0000000000
--- a/plugins/mapper-attachments/licenses/pdfbox-2.0.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dbc69649118b7eff278f228c070a40ee559e1f62 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/pdfbox-LICENSE.txt b/plugins/mapper-attachments/licenses/pdfbox-LICENSE.txt
deleted file mode 100644
index 97553f24a4..0000000000
--- a/plugins/mapper-attachments/licenses/pdfbox-LICENSE.txt
+++ /dev/null
@@ -1,344 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-EXTERNAL COMPONENTS
-
-Apache PDFBox includes a number of components with separate copyright notices
-and license terms. Your use of these components is subject to the terms and
-conditions of the following licenses.
-
-Contributions made to the original PDFBox and FontBox projects:
-
- Copyright (c) 2002-2007, www.pdfbox.org
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of pdfbox; nor the names of its contributors may be
- used to endorse or promote products derived from this software without
- specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- SUCH DAMAGE.
-
-Adobe Font Metrics (AFM) for PDF Core 14 Fonts
-
- This file and the 14 PostScript(R) AFM files it accompanies may be used,
- copied, and distributed for any purpose and without charge, with or without
- modification, provided that all copyright notices are retained; that the
- AFM files are not distributed without this file; that all modifications
- to this file or any of the AFM files are prominently noted in the modified
- file(s); and that this paragraph is not modified. Adobe Systems has no
- responsibility or obligation to support the use of the AFM files.
-
-CMaps for PDF Fonts (http://opensource.adobe.com/wiki/display/cmap/Downloads)
-
- Copyright 1990-2009 Adobe Systems Incorporated.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- Neither the name of Adobe Systems Incorporated nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- THE POSSIBILITY OF SUCH DAMAGE.
-
-PaDaF PDF/A preflight (http://sourceforge.net/projects/padaf)
-
- Copyright 2010 Atos Worldline SAS
-
- Licensed by Atos Worldline SAS under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- Atos Worldline SAS licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-OSXAdapter
-
- Version: 2.0
-
- Disclaimer: IMPORTANT: This Apple software is supplied to you by
- Apple Inc. ("Apple") in consideration of your agreement to the
- following terms, and your use, installation, modification or
- redistribution of this Apple software constitutes acceptance of these
- terms. If you do not agree with these terms, please do not use,
- install, modify or redistribute this Apple software.
-
- In consideration of your agreement to abide by the following terms, and
- subject to these terms, Apple grants you a personal, non-exclusive
- license, under Apple's copyrights in this original Apple software (the
- "Apple Software"), to use, reproduce, modify and redistribute the Apple
- Software, with or without modifications, in source and/or binary forms;
- provided that if you redistribute the Apple Software in its entirety and
- without modifications, you must retain this notice and the following
- text and disclaimers in all such redistributions of the Apple Software.
- Neither the name, trademarks, service marks or logos of Apple Inc.
- may be used to endorse or promote products derived from the Apple
- Software without specific prior written permission from Apple. Except
- as expressly stated in this notice, no other rights or licenses, express
- or implied, are granted by Apple herein, including but not limited to
- any patent rights that may be infringed by your derivative works or by
- other works in which the Apple Software may be incorporated.
-
- The Apple Software is provided by Apple on an "AS IS" basis. APPLE
- MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
- THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
- FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
- OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
-
- IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
- OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
- MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
- AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
- STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
- Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved
diff --git a/plugins/mapper-attachments/licenses/pdfbox-NOTICE.txt b/plugins/mapper-attachments/licenses/pdfbox-NOTICE.txt
deleted file mode 100644
index 3c85708256..0000000000
--- a/plugins/mapper-attachments/licenses/pdfbox-NOTICE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Apache PDFBox
-Copyright 2014 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Based on source code originally developed in the PDFBox and
-FontBox projects.
-
-Copyright (c) 2002-2007, www.pdfbox.org
-
-Based on source code originally developed in the PaDaF project.
-Copyright (c) 2010 Atos Worldline SAS
-
-Includes the Adobe Glyph List
-Copyright 1997, 1998, 2002, 2007, 2010 Adobe Systems Incorporated.
-
-Includes the Zapf Dingbats Glyph List
-Copyright 2002, 2010 Adobe Systems Incorporated.
-
-Includes OSXAdapter
-Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved
diff --git a/plugins/mapper-attachments/licenses/poi-3.15-beta1.jar.sha1 b/plugins/mapper-attachments/licenses/poi-3.15-beta1.jar.sha1
deleted file mode 100644
index 6049604dd9..0000000000
--- a/plugins/mapper-attachments/licenses/poi-3.15-beta1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-048bb8326b81323631d9ceb4236cfbd382e56da2 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/poi-LICENSE.txt b/plugins/mapper-attachments/licenses/poi-LICENSE.txt
deleted file mode 100644
index dd2cbd5fbc..0000000000
--- a/plugins/mapper-attachments/licenses/poi-LICENSE.txt
+++ /dev/null
@@ -1,463 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-APACHE POI SUBCOMPONENTS:
-
-Apache POI includes subcomponents with separate copyright notices and
-license terms. Your use of these subcomponents is subject to the terms
-and conditions of the following licenses:
-
-
-Office Open XML schemas (ooxml-schemas-1.1.jar)
-
- The Office Open XML schema definitions used by Apache POI are
- a part of the Office Open XML ECMA Specification (ECMA-376, [1]).
- As defined in section 9.4 of the ECMA bylaws [2], this specification
- is available to all interested parties without restriction:
-
- 9.4 All documents when approved shall be made available to
- all interested parties without restriction.
-
- Furthermore, both Microsoft and Adobe have granted patent licenses
- to this work [3,4,5].
-
- [1] http://www.ecma-international.org/publications/standards/Ecma-376.htm
- [2] http://www.ecma-international.org/memento/Ecmabylaws.htm
- [3] http://www.microsoft.com/openspecifications/en/us/programs/osp/default.aspx
- [4] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Edition%202%20Microsoft%20Patent%20Declaration.pdf
- [5] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Adobe%20Patent%20Declaration.pdf
-
-
-JUnit test library (junit-4.11.jar)
-
- Common Public License - v 1.0
-
- THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON
- PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
- OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
- 1. DEFINITIONS
-
- "Contribution" means:
-
- a) in the case of the initial Contributor, the initial code and
- documentation distributed under this Agreement, and
-
- b) in the case of each subsequent Contributor:
-
- i) changes to the Program, and
-
- ii) additions to the Program;
-
- where such changes and/or additions to the Program originate from
- and are distributed by that particular Contributor. A Contribution
- 'originates' from a Contributor if it was added to the Program by
- such Contributor itself or anyone acting on such Contributor's behalf.
- Contributions do not include additions to the Program which: (i) are
- separate modules of software distributed in conjunction with the
- Program under their own license agreement, and (ii) are not derivative
- works of the Program.
-
- "Contributor" means any person or entity that distributes the Program.
-
- "Licensed Patents " mean patent claims licensable by a Contributor which
- are necessarily infringed by the use or sale of its Contribution alone
- or when combined with the Program.
-
- "Program" means the Contributions distributed in accordance with this
- Agreement.
-
- "Recipient" means anyone who receives the Program under this Agreement,
- including all Contributors.
-
- 2. GRANT OF RIGHTS
-
- a) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free copyright license
- to reproduce, prepare derivative works of, publicly display, publicly
- perform, distribute and sublicense the Contribution of such
- Contributor, if any, and such derivative works, in source code and
- object code form.
-
- b) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free patent license under
- Licensed Patents to make, use, sell, offer to sell, import and
- otherwise transfer the Contribution of such Contributor, if any, in
- source code and object code form. This patent license shall apply to
- the combination of the Contribution and the Program if, at the time
- the Contribution is added by the Contributor, such addition of the
- Contribution causes such combination to be covered by the Licensed
- Patents. The patent license shall not apply to any other combinations
- which include the Contribution. No hardware per se is licensed
- hereunder.
-
- c) Recipient understands that although each Contributor grants the
- licenses to its Contributions set forth herein, no assurances are
- provided by any Contributor that the Program does not infringe the
- patent or other intellectual property rights of any other entity.
- Each Contributor disclaims any liability to Recipient for claims
- brought by any other entity based on infringement of intellectual
- property rights or otherwise. As a condition to exercising the rights
- and licenses granted hereunder, each Recipient hereby assumes sole
- responsibility to secure any other intellectual property rights
- needed, if any. For example, if a third party patent license is
- required to allow Recipient to distribute the Program, it is
- Recipient's responsibility to acquire that license before
- distributing the Program.
-
- d) Each Contributor represents that to its knowledge it has sufficient
- copyright rights in its Contribution, if any, to grant the copyright
- license set forth in this Agreement.
-
- 3. REQUIREMENTS
-
- A Contributor may choose to distribute the Program in object code form
- under its own license agreement, provided that:
-
- a) it complies with the terms and conditions of this Agreement; and
-
- b) its license agreement:
-
- i) effectively disclaims on behalf of all Contributors all warranties
- and conditions, express and implied, including warranties or
- conditions of title and non-infringement, and implied warranties
- or conditions of merchantability and fitness for a particular
- purpose;
-
- ii) effectively excludes on behalf of all Contributors all liability
- for damages, including direct, indirect, special, incidental and
- consequential damages, such as lost profits;
-
- iii) states that any provisions which differ from this Agreement are
- offered by that Contributor alone and not by any other party; and
-
- iv) states that source code for the Program is available from such
- Contributor, and informs licensees how to obtain it in a
- reasonable manner on or through a medium customarily used for
- software exchange.
-
- When the Program is made available in source code form:
-
- a) it must be made available under this Agreement; and
-
- b) a copy of this Agreement must be included with each copy of
- the Program.
-
- Contributors may not remove or alter any copyright notices contained
- within the Program.
-
- Each Contributor must identify itself as the originator of its
- Contribution, if any, in a manner that reasonably allows subsequent
- Recipients to identify the originator of the Contribution.
-
- 4. COMMERCIAL DISTRIBUTION
-
- Commercial distributors of software may accept certain responsibilities
- with respect to end users, business partners and the like. While this
- license is intended to facilitate the commercial use of the Program,
- the Contributor who includes the Program in a commercial product offering
- should do so in a manner which does not create potential liability for
- other Contributors. Therefore, if a Contributor includes the Program
- in a commercial product offering, such Contributor ("Commercial
- Contributor") hereby agrees to defend and indemnify every other
- Contributor ("Indemnified Contributor") against any losses, damages
- and costs (collectively "Losses") arising from claims, lawsuits and
- other legal actions brought by a third party against the Indemnified
- Contributor to the extent caused by the acts or omissions of such
- Commercial Contributor in connection with its distribution of the
- Program in a commercial product offering. The obligations in this
- section do not apply to any claims or Losses relating to any actual
- or alleged intellectual property infringement. In order to qualify,
- an Indemnified Contributor must: a) promptly notify the Commercial
- Contributor in writing of such claim, and b) allow the Commercial
- Contributor to control, and cooperate with the Commercial Contributor
- in, the defense and any related settlement negotiations. The Indemnified
- Contributor may participate in any such claim at its own expense.
-
- For example, a Contributor might include the Program in a commercial
- product offering, Product X. That Contributor is then a Commercial
- Contributor. If that Commercial Contributor then makes performance
- claims, or offers warranties related to Product X, those performance
- claims and warranties are such Commercial Contributor's responsibility
- alone. Under this section, the Commercial Contributor would have to
- defend claims against the other Contributors related to those
- performance claims and warranties, and if a court requires any other
- Contributor to pay any damages as a result, the Commercial Contributor
- must pay those damages.
-
- 5. NO WARRANTY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED
- ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
- EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR
- CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR
- A PARTICULAR PURPOSE. Each Recipient is solely responsible for
- determining the appropriateness of using and distributing the Program
- and assumes all risks associated with its exercise of rights under this
- Agreement, including but not limited to the risks and costs of program
- errors, compliance with applicable laws, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- 6. DISCLAIMER OF LIABILITY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR
- ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING
- WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR
- DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
- 7. GENERAL
-
- If any provision of this Agreement is invalid or unenforceable under
- applicable law, it shall not affect the validity or enforceability of
- the remainder of the terms of this Agreement, and without further
- action by the parties hereto, such provision shall be reformed to the
- minimum extent necessary to make such provision valid and enforceable.
-
- If Recipient institutes patent litigation against a Contributor with
- respect to a patent applicable to software (including a cross-claim or
- counterclaim in a lawsuit), then any patent licenses granted by that
- Contributor to such Recipient under this Agreement shall terminate as of
- the date such litigation is filed. In addition, if Recipient institutes
- patent litigation against any entity (including a cross-claim or
- counterclaim in a lawsuit) alleging that the Program itself (excluding
- combinations of the Program with other software or hardware) infringes
- such Recipient's patent(s), then such Recipient's rights granted under
- Section 2(b) shall terminate as of the date such litigation is filed.
-
- All Recipient's rights under this Agreement shall terminate if it fails
- to comply with any of the material terms or conditions of this Agreement
- and does not cure such failure in a reasonable period of time after
- becoming aware of such noncompliance. If all Recipient's rights under
- this Agreement terminate, Recipient agrees to cease use and distribution
- of the Program as soon as reasonably practicable. However, Recipient's
- obligations under this Agreement and any licenses granted by Recipient
- relating to the Program shall continue and survive.
-
- Everyone is permitted to copy and distribute copies of this Agreement,
- but in order to avoid inconsistency the Agreement is copyrighted and may
- only be modified in the following manner. The Agreement Steward reserves
- the right to publish new versions (including revisions) of this Agreement
- from time to time. No one other than the Agreement Steward has the right
- to modify this Agreement. IBM is the initial Agreement Steward. IBM may
- assign the responsibility to serve as the Agreement Steward to a suitable
- separate entity. Each new version of the Agreement will be given a
- distinguishing version number. The Program (including Contributions) may
- always be distributed subject to the version of the Agreement under which
- it was received. In addition, after a new version of the Agreement is
- published, Contributor may elect to distribute the Program (including
- its Contributions) under the new version. Except as expressly stated in
- Sections 2(a) and 2(b) above, Recipient receives no rights or licenses
- to the intellectual property of any Contributor under this Agreement,
- whether expressly, by implication, estoppel or otherwise. All rights in
- the Program not expressly granted under this Agreement are reserved.
-
- This Agreement is governed by the laws of the State of New York and the
- intellectual property laws of the United States of America. No party to
- this Agreement will bring a legal action under this Agreement more than
- one year after the cause of action arose. Each party waives its rights
- to a jury trial in any resulting litigation.
diff --git a/plugins/mapper-attachments/licenses/poi-NOTICE.txt b/plugins/mapper-attachments/licenses/poi-NOTICE.txt
deleted file mode 100644
index 12ff265290..0000000000
--- a/plugins/mapper-attachments/licenses/poi-NOTICE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Apache POI
-Copyright 2003-2015 The Apache Software Foundation
-
-This product includes software developed by
-The Apache Software Foundation (http://www.apache.org/).
-
-This product contains parts that were originally based on software from BEA.
-Copyright (c) 2000-2003, BEA Systems, <http://www.bea.com/>.
-
-This product contains W3C XML Schema documents. Copyright 2001-2003 (c)
-World Wide Web Consortium (Massachusetts Institute of Technology, European
-Research Consortium for Informatics and Mathematics, Keio University)
-
-This product contains the Piccolo XML Parser for Java
-(http://piccolo.sourceforge.net/). Copyright 2002 Yuval Oren.
-
-This product contains the chunks_parse_cmds.tbl file from the vsdump program.
-Copyright (C) 2006-2007 Valek Filippov (frob@df.ru)
-
-This product contains parts of the eID Applet project
-(http://eid-applet.googlecode.com). Copyright (c) 2009-2014
-FedICT (federal ICT department of Belgium), e-Contract.be BVBA (https://www.e-contract.be),
-Bart Hanssens from FedICT
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-3.15-beta1.jar.sha1 b/plugins/mapper-attachments/licenses/poi-ooxml-3.15-beta1.jar.sha1
deleted file mode 100644
index c3cf49d924..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-3.15-beta1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-81085a47fdf0d74d473d605c6b3784e26731842e \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-LICENSE.txt b/plugins/mapper-attachments/licenses/poi-ooxml-LICENSE.txt
deleted file mode 100644
index dd2cbd5fbc..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-LICENSE.txt
+++ /dev/null
@@ -1,463 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-APACHE POI SUBCOMPONENTS:
-
-Apache POI includes subcomponents with separate copyright notices and
-license terms. Your use of these subcomponents is subject to the terms
-and conditions of the following licenses:
-
-
-Office Open XML schemas (ooxml-schemas-1.1.jar)
-
- The Office Open XML schema definitions used by Apache POI are
- a part of the Office Open XML ECMA Specification (ECMA-376, [1]).
- As defined in section 9.4 of the ECMA bylaws [2], this specification
- is available to all interested parties without restriction:
-
- 9.4 All documents when approved shall be made available to
- all interested parties without restriction.
-
- Furthermore, both Microsoft and Adobe have granted patent licenses
- to this work [3,4,5].
-
- [1] http://www.ecma-international.org/publications/standards/Ecma-376.htm
- [2] http://www.ecma-international.org/memento/Ecmabylaws.htm
- [3] http://www.microsoft.com/openspecifications/en/us/programs/osp/default.aspx
- [4] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Edition%202%20Microsoft%20Patent%20Declaration.pdf
- [5] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Adobe%20Patent%20Declaration.pdf
-
-
-JUnit test library (junit-4.11.jar)
-
- Common Public License - v 1.0
-
- THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON
- PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
- OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
- 1. DEFINITIONS
-
- "Contribution" means:
-
- a) in the case of the initial Contributor, the initial code and
- documentation distributed under this Agreement, and
-
- b) in the case of each subsequent Contributor:
-
- i) changes to the Program, and
-
- ii) additions to the Program;
-
- where such changes and/or additions to the Program originate from
- and are distributed by that particular Contributor. A Contribution
- 'originates' from a Contributor if it was added to the Program by
- such Contributor itself or anyone acting on such Contributor's behalf.
- Contributions do not include additions to the Program which: (i) are
- separate modules of software distributed in conjunction with the
- Program under their own license agreement, and (ii) are not derivative
- works of the Program.
-
- "Contributor" means any person or entity that distributes the Program.
-
- "Licensed Patents " mean patent claims licensable by a Contributor which
- are necessarily infringed by the use or sale of its Contribution alone
- or when combined with the Program.
-
- "Program" means the Contributions distributed in accordance with this
- Agreement.
-
- "Recipient" means anyone who receives the Program under this Agreement,
- including all Contributors.
-
- 2. GRANT OF RIGHTS
-
- a) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free copyright license
- to reproduce, prepare derivative works of, publicly display, publicly
- perform, distribute and sublicense the Contribution of such
- Contributor, if any, and such derivative works, in source code and
- object code form.
-
- b) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free patent license under
- Licensed Patents to make, use, sell, offer to sell, import and
- otherwise transfer the Contribution of such Contributor, if any, in
- source code and object code form. This patent license shall apply to
- the combination of the Contribution and the Program if, at the time
- the Contribution is added by the Contributor, such addition of the
- Contribution causes such combination to be covered by the Licensed
- Patents. The patent license shall not apply to any other combinations
- which include the Contribution. No hardware per se is licensed
- hereunder.
-
- c) Recipient understands that although each Contributor grants the
- licenses to its Contributions set forth herein, no assurances are
- provided by any Contributor that the Program does not infringe the
- patent or other intellectual property rights of any other entity.
- Each Contributor disclaims any liability to Recipient for claims
- brought by any other entity based on infringement of intellectual
- property rights or otherwise. As a condition to exercising the rights
- and licenses granted hereunder, each Recipient hereby assumes sole
- responsibility to secure any other intellectual property rights
- needed, if any. For example, if a third party patent license is
- required to allow Recipient to distribute the Program, it is
- Recipient's responsibility to acquire that license before
- distributing the Program.
-
- d) Each Contributor represents that to its knowledge it has sufficient
- copyright rights in its Contribution, if any, to grant the copyright
- license set forth in this Agreement.
-
- 3. REQUIREMENTS
-
- A Contributor may choose to distribute the Program in object code form
- under its own license agreement, provided that:
-
- a) it complies with the terms and conditions of this Agreement; and
-
- b) its license agreement:
-
- i) effectively disclaims on behalf of all Contributors all warranties
- and conditions, express and implied, including warranties or
- conditions of title and non-infringement, and implied warranties
- or conditions of merchantability and fitness for a particular
- purpose;
-
- ii) effectively excludes on behalf of all Contributors all liability
- for damages, including direct, indirect, special, incidental and
- consequential damages, such as lost profits;
-
- iii) states that any provisions which differ from this Agreement are
- offered by that Contributor alone and not by any other party; and
-
- iv) states that source code for the Program is available from such
- Contributor, and informs licensees how to obtain it in a
- reasonable manner on or through a medium customarily used for
- software exchange.
-
- When the Program is made available in source code form:
-
- a) it must be made available under this Agreement; and
-
- b) a copy of this Agreement must be included with each copy of
- the Program.
-
- Contributors may not remove or alter any copyright notices contained
- within the Program.
-
- Each Contributor must identify itself as the originator of its
- Contribution, if any, in a manner that reasonably allows subsequent
- Recipients to identify the originator of the Contribution.
-
- 4. COMMERCIAL DISTRIBUTION
-
- Commercial distributors of software may accept certain responsibilities
- with respect to end users, business partners and the like. While this
- license is intended to facilitate the commercial use of the Program,
- the Contributor who includes the Program in a commercial product offering
- should do so in a manner which does not create potential liability for
- other Contributors. Therefore, if a Contributor includes the Program
- in a commercial product offering, such Contributor ("Commercial
- Contributor") hereby agrees to defend and indemnify every other
- Contributor ("Indemnified Contributor") against any losses, damages
- and costs (collectively "Losses") arising from claims, lawsuits and
- other legal actions brought by a third party against the Indemnified
- Contributor to the extent caused by the acts or omissions of such
- Commercial Contributor in connection with its distribution of the
- Program in a commercial product offering. The obligations in this
- section do not apply to any claims or Losses relating to any actual
- or alleged intellectual property infringement. In order to qualify,
- an Indemnified Contributor must: a) promptly notify the Commercial
- Contributor in writing of such claim, and b) allow the Commercial
- Contributor to control, and cooperate with the Commercial Contributor
- in, the defense and any related settlement negotiations. The Indemnified
- Contributor may participate in any such claim at its own expense.
-
- For example, a Contributor might include the Program in a commercial
- product offering, Product X. That Contributor is then a Commercial
- Contributor. If that Commercial Contributor then makes performance
- claims, or offers warranties related to Product X, those performance
- claims and warranties are such Commercial Contributor's responsibility
- alone. Under this section, the Commercial Contributor would have to
- defend claims against the other Contributors related to those
- performance claims and warranties, and if a court requires any other
- Contributor to pay any damages as a result, the Commercial Contributor
- must pay those damages.
-
- 5. NO WARRANTY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED
- ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
- EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR
- CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR
- A PARTICULAR PURPOSE. Each Recipient is solely responsible for
- determining the appropriateness of using and distributing the Program
- and assumes all risks associated with its exercise of rights under this
- Agreement, including but not limited to the risks and costs of program
- errors, compliance with applicable laws, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- 6. DISCLAIMER OF LIABILITY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR
- ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING
- WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR
- DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
- 7. GENERAL
-
- If any provision of this Agreement is invalid or unenforceable under
- applicable law, it shall not affect the validity or enforceability of
- the remainder of the terms of this Agreement, and without further
- action by the parties hereto, such provision shall be reformed to the
- minimum extent necessary to make such provision valid and enforceable.
-
- If Recipient institutes patent litigation against a Contributor with
- respect to a patent applicable to software (including a cross-claim or
- counterclaim in a lawsuit), then any patent licenses granted by that
- Contributor to such Recipient under this Agreement shall terminate as of
- the date such litigation is filed. In addition, if Recipient institutes
- patent litigation against any entity (including a cross-claim or
- counterclaim in a lawsuit) alleging that the Program itself (excluding
- combinations of the Program with other software or hardware) infringes
- such Recipient's patent(s), then such Recipient's rights granted under
- Section 2(b) shall terminate as of the date such litigation is filed.
-
- All Recipient's rights under this Agreement shall terminate if it fails
- to comply with any of the material terms or conditions of this Agreement
- and does not cure such failure in a reasonable period of time after
- becoming aware of such noncompliance. If all Recipient's rights under
- this Agreement terminate, Recipient agrees to cease use and distribution
- of the Program as soon as reasonably practicable. However, Recipient's
- obligations under this Agreement and any licenses granted by Recipient
- relating to the Program shall continue and survive.
-
- Everyone is permitted to copy and distribute copies of this Agreement,
- but in order to avoid inconsistency the Agreement is copyrighted and may
- only be modified in the following manner. The Agreement Steward reserves
- the right to publish new versions (including revisions) of this Agreement
- from time to time. No one other than the Agreement Steward has the right
- to modify this Agreement. IBM is the initial Agreement Steward. IBM may
- assign the responsibility to serve as the Agreement Steward to a suitable
- separate entity. Each new version of the Agreement will be given a
- distinguishing version number. The Program (including Contributions) may
- always be distributed subject to the version of the Agreement under which
- it was received. In addition, after a new version of the Agreement is
- published, Contributor may elect to distribute the Program (including
- its Contributions) under the new version. Except as expressly stated in
- Sections 2(a) and 2(b) above, Recipient receives no rights or licenses
- to the intellectual property of any Contributor under this Agreement,
- whether expressly, by implication, estoppel or otherwise. All rights in
- the Program not expressly granted under this Agreement are reserved.
-
- This Agreement is governed by the laws of the State of New York and the
- intellectual property laws of the United States of America. No party to
- this Agreement will bring a legal action under this Agreement more than
- one year after the cause of action arose. Each party waives its rights
- to a jury trial in any resulting litigation.
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-NOTICE.txt b/plugins/mapper-attachments/licenses/poi-ooxml-NOTICE.txt
deleted file mode 100644
index 12ff265290..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-NOTICE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Apache POI
-Copyright 2003-2015 The Apache Software Foundation
-
-This product includes software developed by
-The Apache Software Foundation (http://www.apache.org/).
-
-This product contains parts that were originally based on software from BEA.
-Copyright (c) 2000-2003, BEA Systems, <http://www.bea.com/>.
-
-This product contains W3C XML Schema documents. Copyright 2001-2003 (c)
-World Wide Web Consortium (Massachusetts Institute of Technology, European
-Research Consortium for Informatics and Mathematics, Keio University)
-
-This product contains the Piccolo XML Parser for Java
-(http://piccolo.sourceforge.net/). Copyright 2002 Yuval Oren.
-
-This product contains the chunks_parse_cmds.tbl file from the vsdump program.
-Copyright (C) 2006-2007 Valek Filippov (frob@df.ru)
-
-This product contains parts of the eID Applet project
-(http://eid-applet.googlecode.com). Copyright (c) 2009-2014
-FedICT (federal ICT department of Belgium), e-Contract.be BVBA (https://www.e-contract.be),
-Bart Hanssens from FedICT
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1 b/plugins/mapper-attachments/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1
deleted file mode 100644
index afd3b676d0..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f8bc979ad79908a99483337f1ca2edf78558ac20 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-LICENSE.txt b/plugins/mapper-attachments/licenses/poi-ooxml-schemas-LICENSE.txt
deleted file mode 100644
index dd2cbd5fbc..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-LICENSE.txt
+++ /dev/null
@@ -1,463 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-APACHE POI SUBCOMPONENTS:
-
-Apache POI includes subcomponents with separate copyright notices and
-license terms. Your use of these subcomponents is subject to the terms
-and conditions of the following licenses:
-
-
-Office Open XML schemas (ooxml-schemas-1.1.jar)
-
- The Office Open XML schema definitions used by Apache POI are
- a part of the Office Open XML ECMA Specification (ECMA-376, [1]).
- As defined in section 9.4 of the ECMA bylaws [2], this specification
- is available to all interested parties without restriction:
-
- 9.4 All documents when approved shall be made available to
- all interested parties without restriction.
-
- Furthermore, both Microsoft and Adobe have granted patent licenses
- to this work [3,4,5].
-
- [1] http://www.ecma-international.org/publications/standards/Ecma-376.htm
- [2] http://www.ecma-international.org/memento/Ecmabylaws.htm
- [3] http://www.microsoft.com/openspecifications/en/us/programs/osp/default.aspx
- [4] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Edition%202%20Microsoft%20Patent%20Declaration.pdf
- [5] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Adobe%20Patent%20Declaration.pdf
-
-
-JUnit test library (junit-4.11.jar)
-
- Common Public License - v 1.0
-
- THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON
- PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
- OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
- 1. DEFINITIONS
-
- "Contribution" means:
-
- a) in the case of the initial Contributor, the initial code and
- documentation distributed under this Agreement, and
-
- b) in the case of each subsequent Contributor:
-
- i) changes to the Program, and
-
- ii) additions to the Program;
-
- where such changes and/or additions to the Program originate from
- and are distributed by that particular Contributor. A Contribution
- 'originates' from a Contributor if it was added to the Program by
- such Contributor itself or anyone acting on such Contributor's behalf.
- Contributions do not include additions to the Program which: (i) are
- separate modules of software distributed in conjunction with the
- Program under their own license agreement, and (ii) are not derivative
- works of the Program.
-
- "Contributor" means any person or entity that distributes the Program.
-
- "Licensed Patents " mean patent claims licensable by a Contributor which
- are necessarily infringed by the use or sale of its Contribution alone
- or when combined with the Program.
-
- "Program" means the Contributions distributed in accordance with this
- Agreement.
-
- "Recipient" means anyone who receives the Program under this Agreement,
- including all Contributors.
-
- 2. GRANT OF RIGHTS
-
- a) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free copyright license
- to reproduce, prepare derivative works of, publicly display, publicly
- perform, distribute and sublicense the Contribution of such
- Contributor, if any, and such derivative works, in source code and
- object code form.
-
- b) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free patent license under
- Licensed Patents to make, use, sell, offer to sell, import and
- otherwise transfer the Contribution of such Contributor, if any, in
- source code and object code form. This patent license shall apply to
- the combination of the Contribution and the Program if, at the time
- the Contribution is added by the Contributor, such addition of the
- Contribution causes such combination to be covered by the Licensed
- Patents. The patent license shall not apply to any other combinations
- which include the Contribution. No hardware per se is licensed
- hereunder.
-
- c) Recipient understands that although each Contributor grants the
- licenses to its Contributions set forth herein, no assurances are
- provided by any Contributor that the Program does not infringe the
- patent or other intellectual property rights of any other entity.
- Each Contributor disclaims any liability to Recipient for claims
- brought by any other entity based on infringement of intellectual
- property rights or otherwise. As a condition to exercising the rights
- and licenses granted hereunder, each Recipient hereby assumes sole
- responsibility to secure any other intellectual property rights
- needed, if any. For example, if a third party patent license is
- required to allow Recipient to distribute the Program, it is
- Recipient's responsibility to acquire that license before
- distributing the Program.
-
- d) Each Contributor represents that to its knowledge it has sufficient
- copyright rights in its Contribution, if any, to grant the copyright
- license set forth in this Agreement.
-
- 3. REQUIREMENTS
-
- A Contributor may choose to distribute the Program in object code form
- under its own license agreement, provided that:
-
- a) it complies with the terms and conditions of this Agreement; and
-
- b) its license agreement:
-
- i) effectively disclaims on behalf of all Contributors all warranties
- and conditions, express and implied, including warranties or
- conditions of title and non-infringement, and implied warranties
- or conditions of merchantability and fitness for a particular
- purpose;
-
- ii) effectively excludes on behalf of all Contributors all liability
- for damages, including direct, indirect, special, incidental and
- consequential damages, such as lost profits;
-
- iii) states that any provisions which differ from this Agreement are
- offered by that Contributor alone and not by any other party; and
-
- iv) states that source code for the Program is available from such
- Contributor, and informs licensees how to obtain it in a
- reasonable manner on or through a medium customarily used for
- software exchange.
-
- When the Program is made available in source code form:
-
- a) it must be made available under this Agreement; and
-
- b) a copy of this Agreement must be included with each copy of
- the Program.
-
- Contributors may not remove or alter any copyright notices contained
- within the Program.
-
- Each Contributor must identify itself as the originator of its
- Contribution, if any, in a manner that reasonably allows subsequent
- Recipients to identify the originator of the Contribution.
-
- 4. COMMERCIAL DISTRIBUTION
-
- Commercial distributors of software may accept certain responsibilities
- with respect to end users, business partners and the like. While this
- license is intended to facilitate the commercial use of the Program,
- the Contributor who includes the Program in a commercial product offering
- should do so in a manner which does not create potential liability for
- other Contributors. Therefore, if a Contributor includes the Program
- in a commercial product offering, such Contributor ("Commercial
- Contributor") hereby agrees to defend and indemnify every other
- Contributor ("Indemnified Contributor") against any losses, damages
- and costs (collectively "Losses") arising from claims, lawsuits and
- other legal actions brought by a third party against the Indemnified
- Contributor to the extent caused by the acts or omissions of such
- Commercial Contributor in connection with its distribution of the
- Program in a commercial product offering. The obligations in this
- section do not apply to any claims or Losses relating to any actual
- or alleged intellectual property infringement. In order to qualify,
- an Indemnified Contributor must: a) promptly notify the Commercial
- Contributor in writing of such claim, and b) allow the Commercial
- Contributor to control, and cooperate with the Commercial Contributor
- in, the defense and any related settlement negotiations. The Indemnified
- Contributor may participate in any such claim at its own expense.
-
- For example, a Contributor might include the Program in a commercial
- product offering, Product X. That Contributor is then a Commercial
- Contributor. If that Commercial Contributor then makes performance
- claims, or offers warranties related to Product X, those performance
- claims and warranties are such Commercial Contributor's responsibility
- alone. Under this section, the Commercial Contributor would have to
- defend claims against the other Contributors related to those
- performance claims and warranties, and if a court requires any other
- Contributor to pay any damages as a result, the Commercial Contributor
- must pay those damages.
-
- 5. NO WARRANTY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED
- ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
- EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR
- CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR
- A PARTICULAR PURPOSE. Each Recipient is solely responsible for
- determining the appropriateness of using and distributing the Program
- and assumes all risks associated with its exercise of rights under this
- Agreement, including but not limited to the risks and costs of program
- errors, compliance with applicable laws, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- 6. DISCLAIMER OF LIABILITY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR
- ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING
- WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR
- DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
- 7. GENERAL
-
- If any provision of this Agreement is invalid or unenforceable under
- applicable law, it shall not affect the validity or enforceability of
- the remainder of the terms of this Agreement, and without further
- action by the parties hereto, such provision shall be reformed to the
- minimum extent necessary to make such provision valid and enforceable.
-
- If Recipient institutes patent litigation against a Contributor with
- respect to a patent applicable to software (including a cross-claim or
- counterclaim in a lawsuit), then any patent licenses granted by that
- Contributor to such Recipient under this Agreement shall terminate as of
- the date such litigation is filed. In addition, if Recipient institutes
- patent litigation against any entity (including a cross-claim or
- counterclaim in a lawsuit) alleging that the Program itself (excluding
- combinations of the Program with other software or hardware) infringes
- such Recipient's patent(s), then such Recipient's rights granted under
- Section 2(b) shall terminate as of the date such litigation is filed.
-
- All Recipient's rights under this Agreement shall terminate if it fails
- to comply with any of the material terms or conditions of this Agreement
- and does not cure such failure in a reasonable period of time after
- becoming aware of such noncompliance. If all Recipient's rights under
- this Agreement terminate, Recipient agrees to cease use and distribution
- of the Program as soon as reasonably practicable. However, Recipient's
- obligations under this Agreement and any licenses granted by Recipient
- relating to the Program shall continue and survive.
-
- Everyone is permitted to copy and distribute copies of this Agreement,
- but in order to avoid inconsistency the Agreement is copyrighted and may
- only be modified in the following manner. The Agreement Steward reserves
- the right to publish new versions (including revisions) of this Agreement
- from time to time. No one other than the Agreement Steward has the right
- to modify this Agreement. IBM is the initial Agreement Steward. IBM may
- assign the responsibility to serve as the Agreement Steward to a suitable
- separate entity. Each new version of the Agreement will be given a
- distinguishing version number. The Program (including Contributions) may
- always be distributed subject to the version of the Agreement under which
- it was received. In addition, after a new version of the Agreement is
- published, Contributor may elect to distribute the Program (including
- its Contributions) under the new version. Except as expressly stated in
- Sections 2(a) and 2(b) above, Recipient receives no rights or licenses
- to the intellectual property of any Contributor under this Agreement,
- whether expressly, by implication, estoppel or otherwise. All rights in
- the Program not expressly granted under this Agreement are reserved.
-
- This Agreement is governed by the laws of the State of New York and the
- intellectual property laws of the United States of America. No party to
- this Agreement will bring a legal action under this Agreement more than
- one year after the cause of action arose. Each party waives its rights
- to a jury trial in any resulting litigation.
diff --git a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-NOTICE.txt b/plugins/mapper-attachments/licenses/poi-ooxml-schemas-NOTICE.txt
deleted file mode 100644
index 12ff265290..0000000000
--- a/plugins/mapper-attachments/licenses/poi-ooxml-schemas-NOTICE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Apache POI
-Copyright 2003-2015 The Apache Software Foundation
-
-This product includes software developed by
-The Apache Software Foundation (http://www.apache.org/).
-
-This product contains parts that were originally based on software from BEA.
-Copyright (c) 2000-2003, BEA Systems, <http://www.bea.com/>.
-
-This product contains W3C XML Schema documents. Copyright 2001-2003 (c)
-World Wide Web Consortium (Massachusetts Institute of Technology, European
-Research Consortium for Informatics and Mathematics, Keio University)
-
-This product contains the Piccolo XML Parser for Java
-(http://piccolo.sourceforge.net/). Copyright 2002 Yuval Oren.
-
-This product contains the chunks_parse_cmds.tbl file from the vsdump program.
-Copyright (C) 2006-2007 Valek Filippov (frob@df.ru)
-
-This product contains parts of the eID Applet project
-(http://eid-applet.googlecode.com). Copyright (c) 2009-2014
-FedICT (federal ICT department of Belgium), e-Contract.be BVBA (https://www.e-contract.be),
-Bart Hanssens from FedICT
diff --git a/plugins/mapper-attachments/licenses/poi-scratchpad-3.15-beta1.jar.sha1 b/plugins/mapper-attachments/licenses/poi-scratchpad-3.15-beta1.jar.sha1
deleted file mode 100644
index 7056a9fa49..0000000000
--- a/plugins/mapper-attachments/licenses/poi-scratchpad-3.15-beta1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f4e276aaf97a60a1156388c9e38069122b7ea914 \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/poi-scratchpad-LICENSE.txt b/plugins/mapper-attachments/licenses/poi-scratchpad-LICENSE.txt
deleted file mode 100644
index dd2cbd5fbc..0000000000
--- a/plugins/mapper-attachments/licenses/poi-scratchpad-LICENSE.txt
+++ /dev/null
@@ -1,463 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-APACHE POI SUBCOMPONENTS:
-
-Apache POI includes subcomponents with separate copyright notices and
-license terms. Your use of these subcomponents is subject to the terms
-and conditions of the following licenses:
-
-
-Office Open XML schemas (ooxml-schemas-1.1.jar)
-
- The Office Open XML schema definitions used by Apache POI are
- a part of the Office Open XML ECMA Specification (ECMA-376, [1]).
- As defined in section 9.4 of the ECMA bylaws [2], this specification
- is available to all interested parties without restriction:
-
- 9.4 All documents when approved shall be made available to
- all interested parties without restriction.
-
- Furthermore, both Microsoft and Adobe have granted patent licenses
- to this work [3,4,5].
-
- [1] http://www.ecma-international.org/publications/standards/Ecma-376.htm
- [2] http://www.ecma-international.org/memento/Ecmabylaws.htm
- [3] http://www.microsoft.com/openspecifications/en/us/programs/osp/default.aspx
- [4] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Edition%202%20Microsoft%20Patent%20Declaration.pdf
- [5] http://www.ecma-international.org/publications/files/ECMA-ST/Ecma%20PATENT/Patent%20statements%20ok/ECMA-376%20Adobe%20Patent%20Declaration.pdf
-
-
-JUnit test library (junit-4.11.jar)
-
- Common Public License - v 1.0
-
- THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON
- PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
- OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
- 1. DEFINITIONS
-
- "Contribution" means:
-
- a) in the case of the initial Contributor, the initial code and
- documentation distributed under this Agreement, and
-
- b) in the case of each subsequent Contributor:
-
- i) changes to the Program, and
-
- ii) additions to the Program;
-
- where such changes and/or additions to the Program originate from
- and are distributed by that particular Contributor. A Contribution
- 'originates' from a Contributor if it was added to the Program by
- such Contributor itself or anyone acting on such Contributor's behalf.
- Contributions do not include additions to the Program which: (i) are
- separate modules of software distributed in conjunction with the
- Program under their own license agreement, and (ii) are not derivative
- works of the Program.
-
- "Contributor" means any person or entity that distributes the Program.
-
- "Licensed Patents " mean patent claims licensable by a Contributor which
- are necessarily infringed by the use or sale of its Contribution alone
- or when combined with the Program.
-
- "Program" means the Contributions distributed in accordance with this
- Agreement.
-
- "Recipient" means anyone who receives the Program under this Agreement,
- including all Contributors.
-
- 2. GRANT OF RIGHTS
-
- a) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free copyright license
- to reproduce, prepare derivative works of, publicly display, publicly
- perform, distribute and sublicense the Contribution of such
- Contributor, if any, and such derivative works, in source code and
- object code form.
-
- b) Subject to the terms of this Agreement, each Contributor hereby grants
- Recipient a non-exclusive, worldwide, royalty-free patent license under
- Licensed Patents to make, use, sell, offer to sell, import and
- otherwise transfer the Contribution of such Contributor, if any, in
- source code and object code form. This patent license shall apply to
- the combination of the Contribution and the Program if, at the time
- the Contribution is added by the Contributor, such addition of the
- Contribution causes such combination to be covered by the Licensed
- Patents. The patent license shall not apply to any other combinations
- which include the Contribution. No hardware per se is licensed
- hereunder.
-
- c) Recipient understands that although each Contributor grants the
- licenses to its Contributions set forth herein, no assurances are
- provided by any Contributor that the Program does not infringe the
- patent or other intellectual property rights of any other entity.
- Each Contributor disclaims any liability to Recipient for claims
- brought by any other entity based on infringement of intellectual
- property rights or otherwise. As a condition to exercising the rights
- and licenses granted hereunder, each Recipient hereby assumes sole
- responsibility to secure any other intellectual property rights
- needed, if any. For example, if a third party patent license is
- required to allow Recipient to distribute the Program, it is
- Recipient's responsibility to acquire that license before
- distributing the Program.
-
- d) Each Contributor represents that to its knowledge it has sufficient
- copyright rights in its Contribution, if any, to grant the copyright
- license set forth in this Agreement.
-
- 3. REQUIREMENTS
-
- A Contributor may choose to distribute the Program in object code form
- under its own license agreement, provided that:
-
- a) it complies with the terms and conditions of this Agreement; and
-
- b) its license agreement:
-
- i) effectively disclaims on behalf of all Contributors all warranties
- and conditions, express and implied, including warranties or
- conditions of title and non-infringement, and implied warranties
- or conditions of merchantability and fitness for a particular
- purpose;
-
- ii) effectively excludes on behalf of all Contributors all liability
- for damages, including direct, indirect, special, incidental and
- consequential damages, such as lost profits;
-
- iii) states that any provisions which differ from this Agreement are
- offered by that Contributor alone and not by any other party; and
-
- iv) states that source code for the Program is available from such
- Contributor, and informs licensees how to obtain it in a
- reasonable manner on or through a medium customarily used for
- software exchange.
-
- When the Program is made available in source code form:
-
- a) it must be made available under this Agreement; and
-
- b) a copy of this Agreement must be included with each copy of
- the Program.
-
- Contributors may not remove or alter any copyright notices contained
- within the Program.
-
- Each Contributor must identify itself as the originator of its
- Contribution, if any, in a manner that reasonably allows subsequent
- Recipients to identify the originator of the Contribution.
-
- 4. COMMERCIAL DISTRIBUTION
-
- Commercial distributors of software may accept certain responsibilities
- with respect to end users, business partners and the like. While this
- license is intended to facilitate the commercial use of the Program,
- the Contributor who includes the Program in a commercial product offering
- should do so in a manner which does not create potential liability for
- other Contributors. Therefore, if a Contributor includes the Program
- in a commercial product offering, such Contributor ("Commercial
- Contributor") hereby agrees to defend and indemnify every other
- Contributor ("Indemnified Contributor") against any losses, damages
- and costs (collectively "Losses") arising from claims, lawsuits and
- other legal actions brought by a third party against the Indemnified
- Contributor to the extent caused by the acts or omissions of such
- Commercial Contributor in connection with its distribution of the
- Program in a commercial product offering. The obligations in this
- section do not apply to any claims or Losses relating to any actual
- or alleged intellectual property infringement. In order to qualify,
- an Indemnified Contributor must: a) promptly notify the Commercial
- Contributor in writing of such claim, and b) allow the Commercial
- Contributor to control, and cooperate with the Commercial Contributor
- in, the defense and any related settlement negotiations. The Indemnified
- Contributor may participate in any such claim at its own expense.
-
- For example, a Contributor might include the Program in a commercial
- product offering, Product X. That Contributor is then a Commercial
- Contributor. If that Commercial Contributor then makes performance
- claims, or offers warranties related to Product X, those performance
- claims and warranties are such Commercial Contributor's responsibility
- alone. Under this section, the Commercial Contributor would have to
- defend claims against the other Contributors related to those
- performance claims and warranties, and if a court requires any other
- Contributor to pay any damages as a result, the Commercial Contributor
- must pay those damages.
-
- 5. NO WARRANTY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED
- ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
- EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR
- CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR
- A PARTICULAR PURPOSE. Each Recipient is solely responsible for
- determining the appropriateness of using and distributing the Program
- and assumes all risks associated with its exercise of rights under this
- Agreement, including but not limited to the risks and costs of program
- errors, compliance with applicable laws, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- 6. DISCLAIMER OF LIABILITY
-
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR
- ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING
- WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR
- DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
- 7. GENERAL
-
- If any provision of this Agreement is invalid or unenforceable under
- applicable law, it shall not affect the validity or enforceability of
- the remainder of the terms of this Agreement, and without further
- action by the parties hereto, such provision shall be reformed to the
- minimum extent necessary to make such provision valid and enforceable.
-
- If Recipient institutes patent litigation against a Contributor with
- respect to a patent applicable to software (including a cross-claim or
- counterclaim in a lawsuit), then any patent licenses granted by that
- Contributor to such Recipient under this Agreement shall terminate as of
- the date such litigation is filed. In addition, if Recipient institutes
- patent litigation against any entity (including a cross-claim or
- counterclaim in a lawsuit) alleging that the Program itself (excluding
- combinations of the Program with other software or hardware) infringes
- such Recipient's patent(s), then such Recipient's rights granted under
- Section 2(b) shall terminate as of the date such litigation is filed.
-
- All Recipient's rights under this Agreement shall terminate if it fails
- to comply with any of the material terms or conditions of this Agreement
- and does not cure such failure in a reasonable period of time after
- becoming aware of such noncompliance. If all Recipient's rights under
- this Agreement terminate, Recipient agrees to cease use and distribution
- of the Program as soon as reasonably practicable. However, Recipient's
- obligations under this Agreement and any licenses granted by Recipient
- relating to the Program shall continue and survive.
-
- Everyone is permitted to copy and distribute copies of this Agreement,
- but in order to avoid inconsistency the Agreement is copyrighted and may
- only be modified in the following manner. The Agreement Steward reserves
- the right to publish new versions (including revisions) of this Agreement
- from time to time. No one other than the Agreement Steward has the right
- to modify this Agreement. IBM is the initial Agreement Steward. IBM may
- assign the responsibility to serve as the Agreement Steward to a suitable
- separate entity. Each new version of the Agreement will be given a
- distinguishing version number. The Program (including Contributions) may
- always be distributed subject to the version of the Agreement under which
- it was received. In addition, after a new version of the Agreement is
- published, Contributor may elect to distribute the Program (including
- its Contributions) under the new version. Except as expressly stated in
- Sections 2(a) and 2(b) above, Recipient receives no rights or licenses
- to the intellectual property of any Contributor under this Agreement,
- whether expressly, by implication, estoppel or otherwise. All rights in
- the Program not expressly granted under this Agreement are reserved.
-
- This Agreement is governed by the laws of the State of New York and the
- intellectual property laws of the United States of America. No party to
- this Agreement will bring a legal action under this Agreement more than
- one year after the cause of action arose. Each party waives its rights
- to a jury trial in any resulting litigation.
diff --git a/plugins/mapper-attachments/licenses/poi-scratchpad-NOTICE.txt b/plugins/mapper-attachments/licenses/poi-scratchpad-NOTICE.txt
deleted file mode 100644
index 12ff265290..0000000000
--- a/plugins/mapper-attachments/licenses/poi-scratchpad-NOTICE.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Apache POI
-Copyright 2003-2015 The Apache Software Foundation
-
-This product includes software developed by
-The Apache Software Foundation (http://www.apache.org/).
-
-This product contains parts that were originally based on software from BEA.
-Copyright (c) 2000-2003, BEA Systems, <http://www.bea.com/>.
-
-This product contains W3C XML Schema documents. Copyright 2001-2003 (c)
-World Wide Web Consortium (Massachusetts Institute of Technology, European
-Research Consortium for Informatics and Mathematics, Keio University)
-
-This product contains the Piccolo XML Parser for Java
-(http://piccolo.sourceforge.net/). Copyright 2002 Yuval Oren.
-
-This product contains the chunks_parse_cmds.tbl file from the vsdump program.
-Copyright (C) 2006-2007 Valek Filippov (frob@df.ru)
-
-This product contains parts of the eID Applet project
-(http://eid-applet.googlecode.com). Copyright (c) 2009-2014
-FedICT (federal ICT department of Belgium), e-Contract.be BVBA (https://www.e-contract.be),
-Bart Hanssens from FedICT
diff --git a/plugins/mapper-attachments/licenses/tagsoup-1.2.1.jar.sha1 b/plugins/mapper-attachments/licenses/tagsoup-1.2.1.jar.sha1
deleted file mode 100644
index 5d227b11a0..0000000000
--- a/plugins/mapper-attachments/licenses/tagsoup-1.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5584627487e984c03456266d3f8802eb85a9ce97
diff --git a/plugins/mapper-attachments/licenses/tagsoup-LICENSE.txt b/plugins/mapper-attachments/licenses/tagsoup-LICENSE.txt
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/plugins/mapper-attachments/licenses/tagsoup-LICENSE.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/plugins/mapper-attachments/licenses/tagsoup-NOTICE.txt b/plugins/mapper-attachments/licenses/tagsoup-NOTICE.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/licenses/tagsoup-NOTICE.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/licenses/tika-core-1.13.jar.sha1 b/plugins/mapper-attachments/licenses/tika-core-1.13.jar.sha1
deleted file mode 100644
index cfc36a450b..0000000000
--- a/plugins/mapper-attachments/licenses/tika-core-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1305c798d41d1d7bbf12cb7c0ca184c98eed25ad \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/tika-core-LICENSE.txt b/plugins/mapper-attachments/licenses/tika-core-LICENSE.txt
deleted file mode 100644
index 9537d733ea..0000000000
--- a/plugins/mapper-attachments/licenses/tika-core-LICENSE.txt
+++ /dev/null
@@ -1,372 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-
-APACHE TIKA SUBCOMPONENTS
-
-Apache Tika includes a number of subcomponents with separate copyright notices
-and license terms. Your use of these subcomponents is subject to the terms and
-conditions of the following licenses.
-
-MIME type information from file-4.26.tar.gz (http://www.darwinsys.com/file/)
-
- Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
- Software written by Ian F. Darwin and others;
- maintained 1994- Christos Zoulas.
-
- This software is not subject to any export provision of the United States
- Department of Commerce, and may be exported to any country or planet.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice immediately at the beginning of the file, without modification,
- this list of conditions, and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- SUCH DAMAGE.
-
-Charset detection code from ICU4J (http://site.icu-project.org/)
-
- Copyright (c) 1995-2009 International Business Machines Corporation
- and others
-
- All rights reserved.
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, and/or sell copies of the Software, and to permit persons
- to whom the Software is furnished to do so, provided that the above
- copyright notice(s) and this permission notice appear in all copies
- of the Software and that both the above copyright notice(s) and this
- permission notice appear in supporting documentation.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE
- BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES,
- OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
- WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
- ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- SOFTWARE.
-
- Except as contained in this notice, the name of a copyright holder shall
- not be used in advertising or otherwise to promote the sale, use or other
- dealings in this Software without prior written authorization of the
- copyright holder.
-
-
-Parsing functionality provided by the NetCDF Java Library (http://www.unidata.ucar.edu/software/netcdf-java/)
-
- Copyright 1993-2010 University Corporation for Atmospheric Research/Unidata
-
- Portions of this software were developed by the Unidata Program at the University
- Corporation for Atmospheric Research.
-
- Access and use of this software shall impose the following obligations and understandings
- on the user. The user is granted the right, without any fee or cost, to use, copy, modify,
- alter, enhance and distribute this software, and any derivative works thereof, and its
- supporting documentation for any purpose whatsoever, provided that this entire notice
- appears in all copies of the software, derivative works and supporting documentation. Further,
- UCAR requests that the user credit UCAR/Unidata in any publications that result from the use
- of this software or in any product that includes this software, although this is not an obligation.
- The names UCAR and/or Unidata, however, may not be used in any advertising or publicity to endorse
- or promote any products or commercial entity unless specific written permission is obtained from
- UCAR/Unidata. The user also understands that UCAR/Unidata is not obligated to provide the user with
- any support, consulting, training or assistance of any kind with regard to the use, operation and
- performance of this software nor to provide the user with any updates, revisions, new versions or
- "bug fixes."
-
- THIS SOFTWARE IS PROVIDED BY UCAR/UNIDATA "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
- BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL UCAR/UNIDATA BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
- DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE ACCESS,
- USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-IPTC Photo Metadata descriptions are taken from the IPTC Photo Metadata
-Standard, July 2010, Copyright 2010 International Press Telecommunications
-Council.
-
- 1. The Specifications and Materials are licensed for use only on the condition that you agree to be bound by the terms of this license. Subject to this and other licensing requirements contained herein, you may, on a non-exclusive basis, use the Specifications and Materials.
- 2. The IPTC openly provides the Specifications and Materials for voluntary use by individuals, partnerships, companies, corporations, organizations and any other entity for use at the entity's own risk. This disclaimer, license and release is intended to apply to the IPTC, its officers, directors, agents, representatives, members, contributors, affiliates, contractors, or co-venturers acting jointly or severally.
- 3. The Document and translations thereof may be copied and furnished to others, and derivative works that comment on or otherwise explain it or assist in its implementation may be prepared, copied, published and distributed, in whole or in part, without restriction of any kind, provided that the copyright and license notices and references to the IPTC appearing in the Document and the terms of this Specifications License Agreement are included on all such copies and derivative works. Further, upon the receipt of written permission from the IPTC, the Document may be modified for the purpose of developing applications that use IPTC Specifications or as required to translate the Document into languages other than English.
- 4. Any use, duplication, distribution, or exploitation of the Document and Specifications and Materials in any manner is at your own risk.
- 5. NO WARRANTY, EXPRESSED OR IMPLIED, IS MADE REGARDING THE ACCURACY, ADEQUACY, COMPLETENESS, LEGALITY, RELIABILITY OR USEFULNESS OF ANY INFORMATION CONTAINED IN THE DOCUMENT OR IN ANY SPECIFICATION OR OTHER PRODUCT OR SERVICE PRODUCED OR SPONSORED BY THE IPTC. THE DOCUMENT AND THE INFORMATION CONTAINED HEREIN AND INCLUDED IN ANY SPECIFICATION OR OTHER PRODUCT OR SERVICE OF THE IPTC IS PROVIDED ON AN "AS IS" BASIS. THE IPTC DISCLAIMS ALL WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY ACTUAL OR ASSERTED WARRANTY OF NON-INFRINGEMENT OF PROPRIETARY RIGHTS, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE IPTC NOR ITS CONTRIBUTORS SHALL BE HELD LIABLE FOR ANY IMPROPER OR INCORRECT USE OF INFORMATION. NEITHER THE IPTC NOR ITS CONTRIBUTORS ASSUME ANY RESPONSIBILITY FOR ANYONE'S USE OF INFORMATION PROVIDED BY THE IPTC. IN NO EVENT SHALL THE IPTC OR ITS CONTRIBUTORS BE LIABLE TO ANYONE FOR DAMAGES OF ANY KIND, INCLUDING BUT NOT LIMITED TO, COMPENSATORY DAMAGES, LOST PROFITS, LOST DATA OR ANY FORM OF SPECIAL, INCIDENTAL, INDIRECT, CONSEQUENTIAL OR PUNITIVE DAMAGES OF ANY KIND WHETHER BASED ON BREACH OF CONTRACT OR WARRANTY, TORT, PRODUCT LIABILITY OR OTHERWISE.
- 6. The IPTC takes no position regarding the validity or scope of any Intellectual Property or other rights that might be claimed to pertain to the implementation or use of the technology described in the Document or the extent to which any license under such rights might or might not be available. The IPTC does not represent that it has made any effort to identify any such rights. Copies of claims of rights made available for publication, assurances of licenses to be made available, or the result of an attempt made to obtain a general license or permission for the use of such proprietary rights by implementers or users of the Specifications and Materials, can be obtained from the Managing Director of the IPTC.
- 7. By using the Specifications and Materials including the Document in any manner or for any purpose, you release the IPTC from all liabilities, claims, causes of action, allegations, losses, injuries, damages, or detriments of any nature arising from or relating to the use of the Specifications, Materials or any portion thereof. You further agree not to file a lawsuit, make a claim, or take any other formal or informal legal action against the IPTC, resulting from your acquisition, use, duplication, distribution, or exploitation of the Specifications, Materials or any portion thereof. Finally, you hereby agree that the IPTC is not liable for any direct, indirect, special or consequential damages arising from or relating to your acquisition, use, duplication, distribution, or exploitation of the Specifications, Materials or any portion thereof.
- 8. Specifications and Materials may be downloaded or copied provided that ALL copies retain the ownership, copyright and license notices.
- 9. Materials may not be edited, modified, or presented in a context that creates a misleading or false impression or statement as to the positions, actions, or statements of the IPTC.
- 10. The name and trademarks of the IPTC may not be used in advertising, publicity, or in relation to products or services and their names without the specific, written prior permission of the IPTC. Any permitted use of the trademarks of the IPTC, whether registered or not, shall be accompanied by an appropriate mark and attribution, as agreed with the IPTC.
- 11. Specifications may be extended by both members and non-members to provide additional functionality (Extension Specifications) provided that there is a clear recognition of the IPTC IP and its ownership in the Extension Specifications and the related documentation and provided that the extensions are clearly identified and provided that a perpetual license is granted by the creator of the Extension Specifications for other members and non-members to use the Extension Specifications and to continue extensions of the Extension Specifications. The IPTC does not waive any of its rights in the Specifications and Materials in this context. The Extension Specifications may be considered the intellectual property of their creator. The IPTC expressly disclaims any responsibility for damage caused by an extension to the Specifications.
- 12. Specifications and Materials may be included in derivative work of both members and non-members provided that there is a clear recognition of the IPTC IP and its ownership in the derivative work and its related documentation. The IPTC does not waive any of its rights in the Specifications and Materials in this context. Derivative work in its entirety may be considered the intellectual property of the creator of the work .The IPTC expressly disclaims any responsibility for damage caused when its IP is used in a derivative context.
- 13. This Specifications License Agreement is perpetual subject to your conformance to the terms of this Agreement. The IPTC may terminate this Specifications License Agreement immediately upon your breach of this Agreement and, upon such termination you will cease all use, duplication, distribution, and/or exploitation in any manner of the Specifications and Materials.
- 14. This Specifications License Agreement reflects the entire agreement of the parties regarding the subject matter hereof and supersedes all prior agreements or representations regarding such matters, whether written or oral. To the extent any portion or provision of this Specifications License Agreement is found to be illegal or unenforceable, then the remaining provisions of this Specifications License Agreement will remain in full force and effect and the illegal or unenforceable provision will be construed to give it such effect as it may properly have that is consistent with the intentions of the parties.
- 15. This Specifications License Agreement may only be modified in writing signed by an authorized representative of the IPTC.
- 16. This Specifications License Agreement is governed by the law of United Kingdom, as such law is applied to contracts made and fully performed in the United Kingdom. Any disputes arising from or relating to this Specifications License Agreement will be resolved in the courts of the United Kingdom. You consent to the jurisdiction of such courts over you and covenant not to assert before such courts any objection to proceeding in such forums.
-
-
-JUnRAR (https://github.com/edmund-wagner/junrar/)
-
- JUnRAR is based on the UnRAR tool, and covered by the same license
- It was formerly available from http://java-unrar.svn.sourceforge.net/
-
- ****** ***** ****** UnRAR - free utility for RAR archives
- ** ** ** ** ** ** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ****** ******* ****** License for use and distribution of
- ** ** ** ** ** ** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ** ** ** ** ** ** FREE portable version
- ~~~~~~~~~~~~~~~~~~~~~
-
- The source code of UnRAR utility is freeware. This means:
-
- 1. All copyrights to RAR and the utility UnRAR are exclusively
- owned by the author - Alexander Roshal.
-
- 2. The UnRAR sources may be used in any software to handle RAR
- archives without limitations free of charge, but cannot be used
- to re-create the RAR compression algorithm, which is proprietary.
- Distribution of modified UnRAR sources in separate form or as a
- part of other software is permitted, provided that it is clearly
- stated in the documentation and source comments that the code may
- not be used to develop a RAR (WinRAR) compatible archiver.
-
- 3. The UnRAR utility may be freely distributed. It is allowed
- to distribute UnRAR inside of other software packages.
-
- 4. THE RAR ARCHIVER AND THE UnRAR UTILITY ARE DISTRIBUTED "AS IS".
- NO WARRANTY OF ANY KIND IS EXPRESSED OR IMPLIED. YOU USE AT
- YOUR OWN RISK. THE AUTHOR WILL NOT BE LIABLE FOR DATA LOSS,
- DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING
- OR MISUSING THIS SOFTWARE.
-
- 5. Installing and using the UnRAR utility signifies acceptance of
- these terms and conditions of the license.
-
- 6. If you don't agree with terms of the license you must remove
- UnRAR files from your storage devices and cease to use the
- utility.
-
- Thank you for your interest in RAR and UnRAR. Alexander L. Roshal
-
-Sqlite (bundled in org.xerial's sqlite-jdbc)
- This product bundles Sqlite, which is in the Public Domain. For details
- see: https://www.sqlite.org/copyright.html
diff --git a/plugins/mapper-attachments/licenses/tika-core-NOTICE.txt b/plugins/mapper-attachments/licenses/tika-core-NOTICE.txt
deleted file mode 100644
index 8e94f644b8..0000000000
--- a/plugins/mapper-attachments/licenses/tika-core-NOTICE.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Apache Tika
-Copyright 2015 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Copyright 1993-2010 University Corporation for Atmospheric Research/Unidata
-This software contains code derived from UCAR/Unidata's NetCDF library.
-
-Tika-server component uses CDDL-licensed dependencies: jersey (http://jersey.java.net/) and
-Grizzly (http://grizzly.java.net/)
-
-Tika-parsers component uses CDDL/LGPL dual-licensed dependency: jhighlight (https://github.com/codelibs/jhighlight)
-
-OpenCSV: Copyright 2005 Bytecode Pty Ltd. Licensed under the Apache License, Version 2.0
-
-IPTC Photo Metadata descriptions Copyright 2010 International Press Telecommunications Council.
diff --git a/plugins/mapper-attachments/licenses/tika-parsers-1.13.jar.sha1 b/plugins/mapper-attachments/licenses/tika-parsers-1.13.jar.sha1
deleted file mode 100644
index 7fb2755d54..0000000000
--- a/plugins/mapper-attachments/licenses/tika-parsers-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-374fde67b9d35f785534b0e6c4953533c31bab5f \ No newline at end of file
diff --git a/plugins/mapper-attachments/licenses/tika-parsers-LICENSE.txt b/plugins/mapper-attachments/licenses/tika-parsers-LICENSE.txt
deleted file mode 100644
index 9537d733ea..0000000000
--- a/plugins/mapper-attachments/licenses/tika-parsers-LICENSE.txt
+++ /dev/null
@@ -1,372 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-
-APACHE TIKA SUBCOMPONENTS
-
-Apache Tika includes a number of subcomponents with separate copyright notices
-and license terms. Your use of these subcomponents is subject to the terms and
-conditions of the following licenses.
-
-MIME type information from file-4.26.tar.gz (http://www.darwinsys.com/file/)
-
- Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
- Software written by Ian F. Darwin and others;
- maintained 1994- Christos Zoulas.
-
- This software is not subject to any export provision of the United States
- Department of Commerce, and may be exported to any country or planet.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice immediately at the beginning of the file, without modification,
- this list of conditions, and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- SUCH DAMAGE.
-
-Charset detection code from ICU4J (http://site.icu-project.org/)
-
- Copyright (c) 1995-2009 International Business Machines Corporation
- and others
-
- All rights reserved.
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, and/or sell copies of the Software, and to permit persons
- to whom the Software is furnished to do so, provided that the above
- copyright notice(s) and this permission notice appear in all copies
- of the Software and that both the above copyright notice(s) and this
- permission notice appear in supporting documentation.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE
- BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES,
- OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
- WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
- ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- SOFTWARE.
-
- Except as contained in this notice, the name of a copyright holder shall
- not be used in advertising or otherwise to promote the sale, use or other
- dealings in this Software without prior written authorization of the
- copyright holder.
-
-
-Parsing functionality provided by the NetCDF Java Library (http://www.unidata.ucar.edu/software/netcdf-java/)
-
- Copyright 1993-2010 University Corporation for Atmospheric Research/Unidata
-
- Portions of this software were developed by the Unidata Program at the University
- Corporation for Atmospheric Research.
-
- Access and use of this software shall impose the following obligations and understandings
- on the user. The user is granted the right, without any fee or cost, to use, copy, modify,
- alter, enhance and distribute this software, and any derivative works thereof, and its
- supporting documentation for any purpose whatsoever, provided that this entire notice
- appears in all copies of the software, derivative works and supporting documentation. Further,
- UCAR requests that the user credit UCAR/Unidata in any publications that result from the use
- of this software or in any product that includes this software, although this is not an obligation.
- The names UCAR and/or Unidata, however, may not be used in any advertising or publicity to endorse
- or promote any products or commercial entity unless specific written permission is obtained from
- UCAR/Unidata. The user also understands that UCAR/Unidata is not obligated to provide the user with
- any support, consulting, training or assistance of any kind with regard to the use, operation and
- performance of this software nor to provide the user with any updates, revisions, new versions or
- "bug fixes."
-
- THIS SOFTWARE IS PROVIDED BY UCAR/UNIDATA "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
- BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL UCAR/UNIDATA BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
- DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE ACCESS,
- USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-IPTC Photo Metadata descriptions are taken from the IPTC Photo Metadata
-Standard, July 2010, Copyright 2010 International Press Telecommunications
-Council.
-
- 1. The Specifications and Materials are licensed for use only on the condition that you agree to be bound by the terms of this license. Subject to this and other licensing requirements contained herein, you may, on a non-exclusive basis, use the Specifications and Materials.
- 2. The IPTC openly provides the Specifications and Materials for voluntary use by individuals, partnerships, companies, corporations, organizations and any other entity for use at the entity's own risk. This disclaimer, license and release is intended to apply to the IPTC, its officers, directors, agents, representatives, members, contributors, affiliates, contractors, or co-venturers acting jointly or severally.
- 3. The Document and translations thereof may be copied and furnished to others, and derivative works that comment on or otherwise explain it or assist in its implementation may be prepared, copied, published and distributed, in whole or in part, without restriction of any kind, provided that the copyright and license notices and references to the IPTC appearing in the Document and the terms of this Specifications License Agreement are included on all such copies and derivative works. Further, upon the receipt of written permission from the IPTC, the Document may be modified for the purpose of developing applications that use IPTC Specifications or as required to translate the Document into languages other than English.
- 4. Any use, duplication, distribution, or exploitation of the Document and Specifications and Materials in any manner is at your own risk.
- 5. NO WARRANTY, EXPRESSED OR IMPLIED, IS MADE REGARDING THE ACCURACY, ADEQUACY, COMPLETENESS, LEGALITY, RELIABILITY OR USEFULNESS OF ANY INFORMATION CONTAINED IN THE DOCUMENT OR IN ANY SPECIFICATION OR OTHER PRODUCT OR SERVICE PRODUCED OR SPONSORED BY THE IPTC. THE DOCUMENT AND THE INFORMATION CONTAINED HEREIN AND INCLUDED IN ANY SPECIFICATION OR OTHER PRODUCT OR SERVICE OF THE IPTC IS PROVIDED ON AN "AS IS" BASIS. THE IPTC DISCLAIMS ALL WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY ACTUAL OR ASSERTED WARRANTY OF NON-INFRINGEMENT OF PROPRIETARY RIGHTS, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE IPTC NOR ITS CONTRIBUTORS SHALL BE HELD LIABLE FOR ANY IMPROPER OR INCORRECT USE OF INFORMATION. NEITHER THE IPTC NOR ITS CONTRIBUTORS ASSUME ANY RESPONSIBILITY FOR ANYONE'S USE OF INFORMATION PROVIDED BY THE IPTC. IN NO EVENT SHALL THE IPTC OR ITS CONTRIBUTORS BE LIABLE TO ANYONE FOR DAMAGES OF ANY KIND, INCLUDING BUT NOT LIMITED TO, COMPENSATORY DAMAGES, LOST PROFITS, LOST DATA OR ANY FORM OF SPECIAL, INCIDENTAL, INDIRECT, CONSEQUENTIAL OR PUNITIVE DAMAGES OF ANY KIND WHETHER BASED ON BREACH OF CONTRACT OR WARRANTY, TORT, PRODUCT LIABILITY OR OTHERWISE.
- 6. The IPTC takes no position regarding the validity or scope of any Intellectual Property or other rights that might be claimed to pertain to the implementation or use of the technology described in the Document or the extent to which any license under such rights might or might not be available. The IPTC does not represent that it has made any effort to identify any such rights. Copies of claims of rights made available for publication, assurances of licenses to be made available, or the result of an attempt made to obtain a general license or permission for the use of such proprietary rights by implementers or users of the Specifications and Materials, can be obtained from the Managing Director of the IPTC.
- 7. By using the Specifications and Materials including the Document in any manner or for any purpose, you release the IPTC from all liabilities, claims, causes of action, allegations, losses, injuries, damages, or detriments of any nature arising from or relating to the use of the Specifications, Materials or any portion thereof. You further agree not to file a lawsuit, make a claim, or take any other formal or informal legal action against the IPTC, resulting from your acquisition, use, duplication, distribution, or exploitation of the Specifications, Materials or any portion thereof. Finally, you hereby agree that the IPTC is not liable for any direct, indirect, special or consequential damages arising from or relating to your acquisition, use, duplication, distribution, or exploitation of the Specifications, Materials or any portion thereof.
- 8. Specifications and Materials may be downloaded or copied provided that ALL copies retain the ownership, copyright and license notices.
- 9. Materials may not be edited, modified, or presented in a context that creates a misleading or false impression or statement as to the positions, actions, or statements of the IPTC.
- 10. The name and trademarks of the IPTC may not be used in advertising, publicity, or in relation to products or services and their names without the specific, written prior permission of the IPTC. Any permitted use of the trademarks of the IPTC, whether registered or not, shall be accompanied by an appropriate mark and attribution, as agreed with the IPTC.
- 11. Specifications may be extended by both members and non-members to provide additional functionality (Extension Specifications) provided that there is a clear recognition of the IPTC IP and its ownership in the Extension Specifications and the related documentation and provided that the extensions are clearly identified and provided that a perpetual license is granted by the creator of the Extension Specifications for other members and non-members to use the Extension Specifications and to continue extensions of the Extension Specifications. The IPTC does not waive any of its rights in the Specifications and Materials in this context. The Extension Specifications may be considered the intellectual property of their creator. The IPTC expressly disclaims any responsibility for damage caused by an extension to the Specifications.
- 12. Specifications and Materials may be included in derivative work of both members and non-members provided that there is a clear recognition of the IPTC IP and its ownership in the derivative work and its related documentation. The IPTC does not waive any of its rights in the Specifications and Materials in this context. Derivative work in its entirety may be considered the intellectual property of the creator of the work .The IPTC expressly disclaims any responsibility for damage caused when its IP is used in a derivative context.
- 13. This Specifications License Agreement is perpetual subject to your conformance to the terms of this Agreement. The IPTC may terminate this Specifications License Agreement immediately upon your breach of this Agreement and, upon such termination you will cease all use, duplication, distribution, and/or exploitation in any manner of the Specifications and Materials.
- 14. This Specifications License Agreement reflects the entire agreement of the parties regarding the subject matter hereof and supersedes all prior agreements or representations regarding such matters, whether written or oral. To the extent any portion or provision of this Specifications License Agreement is found to be illegal or unenforceable, then the remaining provisions of this Specifications License Agreement will remain in full force and effect and the illegal or unenforceable provision will be construed to give it such effect as it may properly have that is consistent with the intentions of the parties.
- 15. This Specifications License Agreement may only be modified in writing signed by an authorized representative of the IPTC.
- 16. This Specifications License Agreement is governed by the law of United Kingdom, as such law is applied to contracts made and fully performed in the United Kingdom. Any disputes arising from or relating to this Specifications License Agreement will be resolved in the courts of the United Kingdom. You consent to the jurisdiction of such courts over you and covenant not to assert before such courts any objection to proceeding in such forums.
-
-
-JUnRAR (https://github.com/edmund-wagner/junrar/)
-
- JUnRAR is based on the UnRAR tool, and covered by the same license
- It was formerly available from http://java-unrar.svn.sourceforge.net/
-
- ****** ***** ****** UnRAR - free utility for RAR archives
- ** ** ** ** ** ** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ****** ******* ****** License for use and distribution of
- ** ** ** ** ** ** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ** ** ** ** ** ** FREE portable version
- ~~~~~~~~~~~~~~~~~~~~~
-
- The source code of UnRAR utility is freeware. This means:
-
- 1. All copyrights to RAR and the utility UnRAR are exclusively
- owned by the author - Alexander Roshal.
-
- 2. The UnRAR sources may be used in any software to handle RAR
- archives without limitations free of charge, but cannot be used
- to re-create the RAR compression algorithm, which is proprietary.
- Distribution of modified UnRAR sources in separate form or as a
- part of other software is permitted, provided that it is clearly
- stated in the documentation and source comments that the code may
- not be used to develop a RAR (WinRAR) compatible archiver.
-
- 3. The UnRAR utility may be freely distributed. It is allowed
- to distribute UnRAR inside of other software packages.
-
- 4. THE RAR ARCHIVER AND THE UnRAR UTILITY ARE DISTRIBUTED "AS IS".
- NO WARRANTY OF ANY KIND IS EXPRESSED OR IMPLIED. YOU USE AT
- YOUR OWN RISK. THE AUTHOR WILL NOT BE LIABLE FOR DATA LOSS,
- DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING
- OR MISUSING THIS SOFTWARE.
-
- 5. Installing and using the UnRAR utility signifies acceptance of
- these terms and conditions of the license.
-
- 6. If you don't agree with terms of the license you must remove
- UnRAR files from your storage devices and cease to use the
- utility.
-
- Thank you for your interest in RAR and UnRAR. Alexander L. Roshal
-
-Sqlite (bundled in org.xerial's sqlite-jdbc)
- This product bundles Sqlite, which is in the Public Domain. For details
- see: https://www.sqlite.org/copyright.html
diff --git a/plugins/mapper-attachments/licenses/tika-parsers-NOTICE.txt b/plugins/mapper-attachments/licenses/tika-parsers-NOTICE.txt
deleted file mode 100644
index 8e94f644b8..0000000000
--- a/plugins/mapper-attachments/licenses/tika-parsers-NOTICE.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Apache Tika
-Copyright 2015 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Copyright 1993-2010 University Corporation for Atmospheric Research/Unidata
-This software contains code derived from UCAR/Unidata's NetCDF library.
-
-Tika-server component uses CDDL-licensed dependencies: jersey (http://jersey.java.net/) and
-Grizzly (http://grizzly.java.net/)
-
-Tika-parsers component uses CDDL/LGPL dual-licensed dependency: jhighlight (https://github.com/codelibs/jhighlight)
-
-OpenCSV: Copyright 2005 Bytecode Pty Ltd. Licensed under the Apache License, Version 2.0
-
-IPTC Photo Metadata descriptions Copyright 2010 International Press Telecommunications Council.
diff --git a/plugins/mapper-attachments/licenses/xmlbeans-2.6.0.jar.sha1 b/plugins/mapper-attachments/licenses/xmlbeans-2.6.0.jar.sha1
deleted file mode 100644
index d27c56f66c..0000000000
--- a/plugins/mapper-attachments/licenses/xmlbeans-2.6.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-29e80d2dd51f9dcdef8f9ffaee0d4dc1c9bbfc87
diff --git a/plugins/mapper-attachments/licenses/xmlbeans-LICENSE.txt b/plugins/mapper-attachments/licenses/xmlbeans-LICENSE.txt
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/plugins/mapper-attachments/licenses/xmlbeans-LICENSE.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/plugins/mapper-attachments/licenses/xmlbeans-NOTICE.txt b/plugins/mapper-attachments/licenses/xmlbeans-NOTICE.txt
deleted file mode 100644
index 906cc4c968..0000000000
--- a/plugins/mapper-attachments/licenses/xmlbeans-NOTICE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
- =========================================================================
- == NOTICE file corresponding to section 4(d) of the Apache License, ==
- == Version 2.0, in this case for the Apache XmlBeans distribution. ==
- =========================================================================
-
- This product includes software developed by
- The Apache Software Foundation (http://www.apache.org/).
-
- Portions of this software were originally based on the following:
- - software copyright (c) 2000-2003, BEA Systems, <http://www.bea.com/>.
-
- Aside from contributions to the Apache XMLBeans project, this
- software also includes:
-
- - one or more source files from the Apache Xerces-J and Apache Axis
- products, Copyright (c) 1999-2003 Apache Software Foundation
-
- - W3C XML Schema documents Copyright 2001-2003 (c) World Wide Web
- Consortium (Massachusetts Institute of Technology, European Research
- Consortium for Informatics and Mathematics, Keio University)
-
- - resolver.jar from Apache Xml Commons project,
- Copyright (c) 2001-2003 Apache Software Foundation
-
- - Piccolo XML Parser for Java from http://piccolo.sourceforge.net/,
- Copyright 2002 Yuval Oren under the terms of the Apache Software License 2.0
-
- - JSR-173 Streaming API for XML from http://sourceforge.net/projects/xmlpullparser/,
- Copyright 2005 BEA under the terms of the Apache Software License 2.0
diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java
deleted file mode 100644
index a7d8228397..0000000000
--- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java
+++ /dev/null
@@ -1,656 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.search.Query;
-import org.apache.tika.language.LanguageIdentifier;
-import org.apache.tika.metadata.Metadata;
-import org.elasticsearch.Version;
-import org.elasticsearch.common.collect.Iterators;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.ESLoggerFactory;
-import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.common.settings.Setting.Property;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.index.mapper.DateFieldMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.FieldMapper;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.NumberFieldMapper;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.TextFieldMapper;
-import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
-import org.elasticsearch.index.query.QueryShardContext;
-import org.elasticsearch.index.query.QueryShardException;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import static org.elasticsearch.index.mapper.TypeParsers.parseMultiField;
-
-/**
- * <pre>
- * "field1" : "..."
- * </pre>
- * <p>Or:
- * <pre>
- * {
- * "file1" : {
- * "_content_type" : "application/pdf",
- * "_content_length" : "500000000",
- * "_name" : "..../something.pdf",
- * "_content" : ""
- * }
- * }
- * </pre>
- * <p>
- * _content_length = Specify the maximum amount of characters to extract from the attachment. If not specified, then the default for
- * tika is 100,000 characters. Caution is required when setting large values as this can cause memory issues.
- */
-public class AttachmentMapper extends FieldMapper {
-
- private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment");
- public static final Setting<Boolean> INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING =
- Setting.boolSetting("index.mapping.attachment.ignore_errors", true, Property.IndexScope);
- public static final Setting<Boolean> INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING =
- Setting.boolSetting("index.mapping.attachment.detect_language", false, Property.IndexScope);
- public static final Setting<Integer> INDEX_ATTACHMENT_INDEXED_CHARS_SETTING =
- Setting.intSetting("index.mapping.attachment.indexed_chars", 100000, Property.IndexScope);
-
- public static final String CONTENT_TYPE = "attachment";
-
- public static class Defaults {
-
- public static final AttachmentFieldType FIELD_TYPE = new AttachmentFieldType();
- static {
- FIELD_TYPE.freeze();
- }
- }
-
- public static class FieldNames {
- public static final String CONTENT = "content";
- public static final String TITLE = "title";
- public static final String NAME = "name";
- public static final String AUTHOR = "author";
- public static final String KEYWORDS = "keywords";
- public static final String DATE = "date";
- public static final String CONTENT_TYPE = "content_type";
- public static final String CONTENT_LENGTH = "content_length";
- public static final String LANGUAGE = "language";
- }
-
- static final class AttachmentFieldType extends MappedFieldType {
- public AttachmentFieldType() {}
-
- protected AttachmentFieldType(AttachmentMapper.AttachmentFieldType ref) {
- super(ref);
- }
-
- @Override
- public AttachmentMapper.AttachmentFieldType clone() {
- return new AttachmentMapper.AttachmentFieldType(this);
- }
-
- @Override
- public String typeName() {
- return CONTENT_TYPE;
- }
-
- @Override
- public Query termQuery(Object value, QueryShardContext context) {
- throw new QueryShardException(context, "Attachment fields are not searchable: [" + name() + "]");
- }
- }
-
- public static class Builder extends FieldMapper.Builder<Builder, AttachmentMapper> {
-
- private Boolean ignoreErrors = null;
-
- private Integer defaultIndexedChars = null;
-
- private Boolean langDetect = null;
-
- private Mapper.Builder<?, ?> contentBuilder;
-
- private Mapper.Builder<?, ?> titleBuilder = new TextFieldMapper.Builder(FieldNames.TITLE);
-
- private Mapper.Builder<?, ?> nameBuilder = new TextFieldMapper.Builder(FieldNames.NAME);
-
- private Mapper.Builder<?, ?> authorBuilder = new TextFieldMapper.Builder(FieldNames.AUTHOR);
-
- private Mapper.Builder<?, ?> keywordsBuilder = new TextFieldMapper.Builder(FieldNames.KEYWORDS);
-
- private Mapper.Builder<?, ?> dateBuilder = new DateFieldMapper.Builder(FieldNames.DATE);
-
- private Mapper.Builder<?, ?> contentTypeBuilder = new TextFieldMapper.Builder(FieldNames.CONTENT_TYPE);
-
- private Mapper.Builder<?, ?> contentLengthBuilder = new NumberFieldMapper.Builder(FieldNames.CONTENT_LENGTH, NumberType.INTEGER);
-
- private Mapper.Builder<?, ?> languageBuilder = new TextFieldMapper.Builder(FieldNames.LANGUAGE);
-
- public Builder(String name) {
- super(name, new AttachmentFieldType(), new AttachmentFieldType());
- this.builder = this;
- this.contentBuilder = new TextFieldMapper.Builder(FieldNames.CONTENT);
- }
-
- public Builder content(Mapper.Builder<?, ?> content) {
- this.contentBuilder = content;
- return this;
- }
-
- public Builder date(Mapper.Builder<?, ?> date) {
- this.dateBuilder = date;
- return this;
- }
-
- public Builder author(Mapper.Builder<?, ?> author) {
- this.authorBuilder = author;
- return this;
- }
-
- public Builder title(Mapper.Builder<?, ?> title) {
- this.titleBuilder = title;
- return this;
- }
-
- public Builder name(Mapper.Builder<?, ?> name) {
- this.nameBuilder = name;
- return this;
- }
-
- public Builder keywords(Mapper.Builder<?, ?> keywords) {
- this.keywordsBuilder = keywords;
- return this;
- }
-
- public Builder contentType(Mapper.Builder<?, ?> contentType) {
- this.contentTypeBuilder = contentType;
- return this;
- }
-
- public Builder contentLength(Mapper.Builder<?, ?> contentType) {
- this.contentLengthBuilder = contentType;
- return this;
- }
-
- public Builder language(Mapper.Builder<?, ?> language) {
- this.languageBuilder = language;
- return this;
- }
-
- @Override
- public AttachmentMapper build(BuilderContext context) {
-
- FieldMapper contentMapper;
- if (context.indexCreatedVersion().before(Version.V_2_0_0_beta1)) {
- // old behavior, we need the content to be indexed under the attachment field name
- if (contentBuilder instanceof FieldMapper.Builder == false) {
- throw new IllegalStateException("content field for attachment must be a field mapper");
- }
- contentBuilder.name = name + "." + FieldNames.CONTENT;
- contentMapper = (FieldMapper) contentBuilder.build(context);
- context.path().add(name);
- } else {
- context.path().add(name);
- contentMapper = (FieldMapper) contentBuilder.build(context);
- }
-
- FieldMapper dateMapper = (FieldMapper) dateBuilder.build(context);
- FieldMapper authorMapper = (FieldMapper) authorBuilder.build(context);
- FieldMapper titleMapper = (FieldMapper) titleBuilder.build(context);
- FieldMapper nameMapper = (FieldMapper) nameBuilder.build(context);
- FieldMapper keywordsMapper = (FieldMapper) keywordsBuilder.build(context);
- FieldMapper contentTypeMapper = (FieldMapper) contentTypeBuilder.build(context);
- FieldMapper contentLength = (FieldMapper) contentLengthBuilder.build(context);
- FieldMapper language = (FieldMapper) languageBuilder.build(context);
- context.path().remove();
-
- if (defaultIndexedChars == null && context.indexSettings() != null) {
- defaultIndexedChars = INDEX_ATTACHMENT_INDEXED_CHARS_SETTING.get(context.indexSettings());
- }
- if (defaultIndexedChars == null) {
- defaultIndexedChars = 100000;
- }
-
- if (ignoreErrors == null && context.indexSettings() != null) {
- ignoreErrors = INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING.get(context.indexSettings());
- }
- if (ignoreErrors == null) {
- ignoreErrors = Boolean.TRUE;
- }
-
- if (langDetect == null && context.indexSettings() != null) {
- langDetect = INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING.get(context.indexSettings());
- }
- if (langDetect == null) {
- langDetect = Boolean.FALSE;
- }
- MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone();
- if(this.fieldType.indexOptions() != IndexOptions.NONE && !this.fieldType.tokenized()) {
- defaultFieldType.setOmitNorms(true);
- defaultFieldType.setIndexOptions(IndexOptions.DOCS);
- if(!this.omitNormsSet && this.fieldType.boost() == 1.0F) {
- this.fieldType.setOmitNorms(true);
- }
-
- if(!this.indexOptionsSet) {
- this.fieldType.setIndexOptions(IndexOptions.DOCS);
- }
- }
-
- defaultFieldType.freeze();
- this.setupFieldType(context);
- return new AttachmentMapper(name, fieldType, defaultFieldType, defaultIndexedChars, ignoreErrors, langDetect, contentMapper,
- dateMapper, titleMapper, nameMapper, authorMapper, keywordsMapper, contentTypeMapper, contentLength,
- language, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- }
- }
-
- /**
- * <pre>
- * field1 : { type : "attachment" }
- * </pre>
- * Or:
- * <pre>
- * field1 : {
- * type : "attachment",
- * fields : {
- * content : {type : "binary"},
- * title : {store : "yes"},
- * date : {store : "yes"},
- * name : {store : "yes"},
- * author : {store : "yes"},
- * keywords : {store : "yes"},
- * content_type : {store : "yes"},
- * content_length : {store : "yes"}
- * }
- * }
- * </pre>
- */
- public static class TypeParser implements Mapper.TypeParser {
-
- private Mapper.Builder<?, ?> findMapperBuilder(Map<String, Object> propNode, String propName, ParserContext parserContext) {
- String type;
- Object typeNode = propNode.get("type");
- if (typeNode != null) {
- type = typeNode.toString();
- } else {
- type = "text";
- }
- Mapper.TypeParser typeParser = parserContext.typeParser(type);
- Mapper.Builder<?, ?> mapperBuilder = typeParser.parse(propName, propNode, parserContext);
-
- return mapperBuilder;
- }
-
- @Override
- @SuppressWarnings("unchecked") // Safe because we know how our maps are shaped
- public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
- AttachmentMapper.Builder builder = new AttachmentMapper.Builder(name);
-
- for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
- Map.Entry<String, Object> entry = iterator.next();
- String fieldName = entry.getKey();
- Object fieldNode = entry.getValue();
- if (fieldName.equals("fields")) {
- Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode;
- for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
- Map.Entry<String, Object> entry1 = fieldsIterator.next();
- String propName = entry1.getKey();
- Map<String, Object> propNode = (Map<String, Object>) entry1.getValue();
-
- Mapper.Builder<?, ?> mapperBuilder = findMapperBuilder(propNode, propName, parserContext);
- if (parseMultiField((FieldMapper.Builder<?, ?>) mapperBuilder, fieldName, parserContext, propName, propNode)) {
- fieldsIterator.remove();
- } else if (propName.equals(name) && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- builder.content(mapperBuilder);
- fieldsIterator.remove();
- } else {
- switch (propName) {
- case FieldNames.CONTENT:
- builder.content(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.DATE:
- builder.date(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.AUTHOR:
- builder.author(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.CONTENT_LENGTH:
- builder.contentLength(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.CONTENT_TYPE:
- builder.contentType(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.KEYWORDS:
- builder.keywords(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.LANGUAGE:
- builder.language(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.TITLE:
- builder.title(mapperBuilder);
- fieldsIterator.remove();
- break;
- case FieldNames.NAME:
- builder.name(mapperBuilder);
- fieldsIterator.remove();
- break;
- }
- }
- }
- DocumentMapperParser.checkNoRemainingFields(fieldName, fieldsNode, parserContext.indexVersionCreated());
- iterator.remove();
- }
- }
-
- return builder;
- }
- }
-
- private final int defaultIndexedChars;
-
- private final boolean ignoreErrors;
-
- private final boolean defaultLangDetect;
-
- private final FieldMapper contentMapper;
-
- private final FieldMapper dateMapper;
-
- private final FieldMapper authorMapper;
-
- private final FieldMapper titleMapper;
-
- private final FieldMapper nameMapper;
-
- private final FieldMapper keywordsMapper;
-
- private final FieldMapper contentTypeMapper;
-
- private final FieldMapper contentLengthMapper;
-
- private final FieldMapper languageMapper;
-
- public AttachmentMapper(String simpleName, MappedFieldType type, MappedFieldType defaultFieldType, int defaultIndexedChars, Boolean ignoreErrors,
- Boolean defaultLangDetect, FieldMapper contentMapper,
- FieldMapper dateMapper, FieldMapper titleMapper, FieldMapper nameMapper, FieldMapper authorMapper,
- FieldMapper keywordsMapper, FieldMapper contentTypeMapper, FieldMapper contentLengthMapper,
- FieldMapper languageMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
- super(simpleName, type, defaultFieldType, indexSettings, multiFields, copyTo);
- this.defaultIndexedChars = defaultIndexedChars;
- this.ignoreErrors = ignoreErrors;
- this.defaultLangDetect = defaultLangDetect;
- this.contentMapper = contentMapper;
- this.dateMapper = dateMapper;
- this.titleMapper = titleMapper;
- this.nameMapper = nameMapper;
- this.authorMapper = authorMapper;
- this.keywordsMapper = keywordsMapper;
- this.contentTypeMapper = contentTypeMapper;
- this.contentLengthMapper = contentLengthMapper;
- this.languageMapper = languageMapper;
- }
-
- @Override
- public Mapper parse(ParseContext context) throws IOException {
- byte[] content = null;
- String contentType = null;
- int indexedChars = defaultIndexedChars;
- boolean langDetect = defaultLangDetect;
- String name = null;
- String language = null;
-
- XContentParser parser = context.parser();
- XContentParser.Token token = parser.currentToken();
- if (token == XContentParser.Token.VALUE_STRING) {
- content = parser.binaryValue();
- } else {
- String currentFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token == XContentParser.Token.VALUE_STRING) {
- if ("_content".equals(currentFieldName)) {
- content = parser.binaryValue();
- } else if ("_content_type".equals(currentFieldName)) {
- contentType = parser.text();
- } else if ("_name".equals(currentFieldName)) {
- name = parser.text();
- } else if ("_language".equals(currentFieldName)) {
- language = parser.text();
- }
- } else if (token == XContentParser.Token.VALUE_NUMBER) {
- if ("_indexed_chars".equals(currentFieldName) || "_indexedChars".equals(currentFieldName)) {
- indexedChars = parser.intValue();
- }
- } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
- if ("_detect_language".equals(currentFieldName) || "_detectLanguage".equals(currentFieldName)) {
- langDetect = parser.booleanValue();
- }
- }
- }
- }
-
- // Throw clean exception when no content is provided Fix #23
- if (content == null) {
- throw new MapperParsingException("No content is provided.");
- }
-
- Metadata metadata = new Metadata();
- if (contentType != null) {
- metadata.add(Metadata.CONTENT_TYPE, contentType);
- }
- if (name != null) {
- metadata.add(Metadata.RESOURCE_NAME_KEY, name);
- }
-
- String parsedContent;
- try {
- parsedContent = TikaImpl.parse(content, metadata, indexedChars);
- } catch (Exception e) {
- // #18: we could ignore errors when Tika does not parse data
- if (!ignoreErrors) {
- logger.trace("exception caught", e);
- throw new MapperParsingException("Failed to extract [" + indexedChars + "] characters of text for [" + name + "] : "
- + e.getMessage(), e);
- } else {
- logger.debug("Failed to extract [{}] characters of text for [{}]: [{}]", indexedChars, name, e.getMessage());
- logger.trace("exception caught", e);
- }
- return null;
- }
-
- context = context.createExternalValueContext(parsedContent);
- contentMapper.parse(context);
-
- if (langDetect) {
- try {
- if (language != null) {
- metadata.add(Metadata.CONTENT_LANGUAGE, language);
- } else {
- LanguageIdentifier identifier = new LanguageIdentifier(parsedContent);
- language = identifier.getLanguage();
- }
- context = context.createExternalValueContext(language);
- languageMapper.parse(context);
- } catch(Exception e) {
- logger.debug("Cannot detect language: [{}]", e.getMessage());
- }
- }
-
- if (name != null) {
- try {
- context = context.createExternalValueContext(name);
- nameMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing name: [{}]",
- e.getMessage());
- }
- }
-
- if (metadata.get(Metadata.DATE) != null) {
- try {
- context = context.createExternalValueContext(metadata.get(Metadata.DATE));
- dateMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing date: [{}]: [{}]",
- e.getMessage(), context.externalValue());
- }
- }
-
- if (metadata.get(Metadata.TITLE) != null) {
- try {
- context = context.createExternalValueContext(metadata.get(Metadata.TITLE));
- titleMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing title: [{}]: [{}]",
- e.getMessage(), context.externalValue());
- }
- }
-
- if (metadata.get(Metadata.AUTHOR) != null) {
- try {
- context = context.createExternalValueContext(metadata.get(Metadata.AUTHOR));
- authorMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing author: [{}]: [{}]",
- e.getMessage(), context.externalValue());
- }
- }
-
- if (metadata.get(Metadata.KEYWORDS) != null) {
- try {
- context = context.createExternalValueContext(metadata.get(Metadata.KEYWORDS));
- keywordsMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing keywords: [{}]: [{}]",
- e.getMessage(), context.externalValue());
- }
- }
-
- if (contentType == null) {
- contentType = metadata.get(Metadata.CONTENT_TYPE);
- }
- if (contentType != null) {
- try {
- context = context.createExternalValueContext(contentType);
- contentTypeMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing content_type: [{}]: [{}]", e.getMessage(), context.externalValue());
- }
- }
-
- int length = content.length;
- // If we have CONTENT_LENGTH from Tika we use it
- if (metadata.get(Metadata.CONTENT_LENGTH) != null) {
- length = Integer.parseInt(metadata.get(Metadata.CONTENT_LENGTH));
- }
-
- try {
- context = context.createExternalValueContext(length);
- contentLengthMapper.parse(context);
- } catch(MapperParsingException e){
- if (!ignoreErrors) throw e;
- if (logger.isDebugEnabled()) logger.debug("Ignoring MapperParsingException catch while parsing content_length: [{}]: [{}]", e.getMessage(), context.externalValue());
- }
-
-// multiFields.parse(this, context);
-
- return null;
- }
-
- @Override
- protected void parseCreateField(ParseContext parseContext, List<Field> fields) throws IOException {
-
- }
-
- @Override
- protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
- // ignore this for now
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public Iterator<Mapper> iterator() {
- List<Mapper> extras = Arrays.asList(
- contentMapper,
- dateMapper,
- titleMapper,
- nameMapper,
- authorMapper,
- keywordsMapper,
- contentTypeMapper,
- contentLengthMapper,
- languageMapper);
- return Iterators.concat(super.iterator(), extras.iterator());
- }
-
- @Override
- public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject(simpleName());
- builder.field("type", CONTENT_TYPE);
-
- builder.startObject("fields");
- contentMapper.toXContent(builder, params);
- authorMapper.toXContent(builder, params);
- titleMapper.toXContent(builder, params);
- nameMapper.toXContent(builder, params);
- dateMapper.toXContent(builder, params);
- keywordsMapper.toXContent(builder, params);
- contentTypeMapper.toXContent(builder, params);
- contentLengthMapper.toXContent(builder, params);
- languageMapper.toXContent(builder, params);
- multiFields.toXContent(builder, params);
- builder.endObject();
-
- multiFields.toXContent(builder, params);
- builder.endObject();
- return builder;
- }
-
- @Override
- protected String contentType() {
- return CONTENT_TYPE;
- }
-}
diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java
deleted file mode 100644
index 6cf957f05c..0000000000
--- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/MapperAttachmentsPlugin.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.ESLoggerFactory;
-import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.plugins.MapperPlugin;
-import org.elasticsearch.plugins.Plugin;
-
-public class MapperAttachmentsPlugin extends Plugin implements MapperPlugin {
-
- private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment");
- private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
-
- @Override
- public List<Setting<?>> getSettings() {
- deprecationLogger.deprecated("[mapper-attachments] plugin has been deprecated and will be replaced by [ingest-attachment] plugin.");
-
- return Arrays.asList(AttachmentMapper.INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING,
- AttachmentMapper.INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING,
- AttachmentMapper.INDEX_ATTACHMENT_INDEXED_CHARS_SETTING);
- }
-
- @Override
- public Map<String, Mapper.TypeParser> getMappers() {
- return Collections.singletonMap("attachment", new AttachmentMapper.TypeParser());
- }
-}
diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java
deleted file mode 100644
index 2babda8ad0..0000000000
--- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java
+++ /dev/null
@@ -1,161 +0,0 @@
-package org.elasticsearch.mapper.attachments;
-
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.tika.Tika;
-import org.apache.tika.exception.TikaException;
-import org.apache.tika.metadata.Metadata;
-import org.apache.tika.parser.AutoDetectParser;
-import org.apache.tika.parser.Parser;
-import org.elasticsearch.SpecialPermission;
-import org.elasticsearch.bootstrap.JarHell;
-import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.io.PathUtils;
-
-import java.io.ByteArrayInputStream;
-import java.io.FilePermission;
-import java.io.IOException;
-import java.lang.reflect.ReflectPermission;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.nio.file.Path;
-import java.security.AccessControlContext;
-import java.security.AccessController;
-import java.security.PermissionCollection;
-import java.security.Permissions;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
-import java.security.ProtectionDomain;
-import java.security.SecurityPermission;
-import java.util.PropertyPermission;
-
-/**
- * Runs tika with limited parsers and limited permissions.
- * <p>
- * Do NOT make public
- */
-final class TikaImpl {
-
- /** subset of parsers for types we support */
- private static final Parser PARSERS[] = new Parser[] {
- // documents
- new org.apache.tika.parser.html.HtmlParser(),
- new org.apache.tika.parser.rtf.RTFParser(),
- new org.apache.tika.parser.pdf.PDFParser(),
- new org.apache.tika.parser.txt.TXTParser(),
- new org.apache.tika.parser.microsoft.OfficeParser(),
- new org.apache.tika.parser.microsoft.OldExcelParser(),
- new org.apache.tika.parser.microsoft.ooxml.OOXMLParser(),
- new org.apache.tika.parser.odf.OpenDocumentParser(),
- new org.apache.tika.parser.iwork.IWorkPackageParser(),
- new org.apache.tika.parser.xml.DcXMLParser(),
- new org.apache.tika.parser.epub.EpubParser(),
- };
-
- /** autodetector based on this subset */
- private static final AutoDetectParser PARSER_INSTANCE = new AutoDetectParser(PARSERS);
-
- /** singleton tika instance */
- private static final Tika TIKA_INSTANCE = new Tika(PARSER_INSTANCE.getDetector(), PARSER_INSTANCE);
-
- /**
- * parses with tika, throwing any exception hit while parsing the document
- */
- // only package private for testing!
- static String parse(final byte content[], final Metadata metadata, final int limit) throws TikaException, IOException {
- // check that its not unprivileged code like a script
- SecurityManager sm = System.getSecurityManager();
- if (sm != null) {
- sm.checkPermission(new SpecialPermission());
- }
-
- try {
- return AccessController.doPrivileged(new PrivilegedExceptionAction<String>() {
- @Override
- public String run() throws TikaException, IOException {
- return TIKA_INSTANCE.parseToString(new ByteArrayInputStream(content), metadata, limit);
- }
- }, RESTRICTED_CONTEXT);
- } catch (PrivilegedActionException e) {
- // checked exception from tika: unbox it
- Throwable cause = e.getCause();
- if (cause instanceof TikaException) {
- throw (TikaException) cause;
- } else if (cause instanceof IOException) {
- throw (IOException) cause;
- } else {
- throw new AssertionError(cause);
- }
- }
- }
-
- // apply additional containment for parsers, this is intersected with the current permissions
- // its hairy, but worth it so we don't have some XML flaw reading random crap from the FS
- private static final AccessControlContext RESTRICTED_CONTEXT = new AccessControlContext(
- new ProtectionDomain[] {
- new ProtectionDomain(null, getRestrictedPermissions())
- }
- );
-
- // compute some minimal permissions for parsers. they only get r/w access to the java temp directory,
- // the ability to load some resources from JARs, and read sysprops
- static PermissionCollection getRestrictedPermissions() {
- Permissions perms = new Permissions();
- // property/env access needed for parsing
- perms.add(new PropertyPermission("*", "read"));
- perms.add(new RuntimePermission("getenv.TIKA_CONFIG"));
-
- // add permissions for resource access:
- // classpath
- addReadPermissions(perms, JarHell.parseClassPath());
- // plugin jars
- if (TikaImpl.class.getClassLoader() instanceof URLClassLoader) {
- addReadPermissions(perms, ((URLClassLoader)TikaImpl.class.getClassLoader()).getURLs());
- }
- // jvm's java.io.tmpdir (needs read/write)
- perms.add(new FilePermission(System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "-",
- "read,readlink,write,delete"));
- // current hacks needed for POI/PDFbox issues:
- perms.add(new SecurityPermission("putProviderProperty.BC"));
- perms.add(new SecurityPermission("insertProvider"));
- perms.add(new ReflectPermission("suppressAccessChecks"));
- // xmlbeans, use by POI, needs to get the context classloader
- perms.add(new RuntimePermission("getClassLoader"));
- perms.setReadOnly();
- return perms;
- }
-
- // add resources to (what is typically) a jar, but might not be (e.g. in tests/IDE)
- @SuppressForbidden(reason = "adds access to jar resources")
- static void addReadPermissions(Permissions perms, URL resources[]) {
- try {
- for (URL url : resources) {
- Path path = PathUtils.get(url.toURI());
- // resource itself
- perms.add(new FilePermission(path.toString(), "read,readlink"));
- // classes underneath
- perms.add(new FilePermission(path.toString() + System.getProperty("file.separator") + "-", "read,readlink"));
- }
- } catch (URISyntaxException bogus) {
- throw new RuntimeException(bogus);
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy b/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy
deleted file mode 100644
index adf76991b5..0000000000
--- a/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-// NOTE: when modifying this file, look at restrictions in TikaImpl too
-grant {
- // needed to apply additional sandboxing to tika parsing
- permission java.security.SecurityPermission "createAccessControlContext";
-
- // TODO: fix PDFBox not to actually install bouncy castle like this
- permission java.security.SecurityPermission "putProviderProperty.BC";
- permission java.security.SecurityPermission "insertProvider";
- // TODO: fix POI XWPF to not do this: https://bz.apache.org/bugzilla/show_bug.cgi?id=58597
- permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
- // needed by xmlbeans, as part of POI for MS xml docs
- permission java.lang.RuntimePermission "getClassLoader";
-};
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/AttachmentUnitTestCase.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/AttachmentUnitTestCase.java
deleted file mode 100644
index 29dfff66d9..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/AttachmentUnitTestCase.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Map;
-
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.mapper.Mapper;
-import org.elasticsearch.indices.IndicesModule;
-import org.elasticsearch.plugins.MapperPlugin;
-import org.elasticsearch.test.ESTestCase;
-import org.junit.Before;
-
-public abstract class AttachmentUnitTestCase extends ESTestCase {
-
- protected Settings testSettings;
-
- protected static IndicesModule getIndicesModuleWithRegisteredAttachmentMapper() {
- return newTestIndicesModule(
- Collections.singletonMap(AttachmentMapper.CONTENT_TYPE, new AttachmentMapper.TypeParser()),
- Collections.emptyMap()
- );
- }
-
- @Before
- public void createSettings() throws Exception {
- testSettings = Settings.builder()
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
- .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id)
- .build();
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/DateAttachmentMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/DateAttachmentMapperTests.java
deleted file mode 100644
index 635c9bee85..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/DateAttachmentMapperTests.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.TextFieldMapper;
-import org.junit.Before;
-
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.instanceOf;
-
-/**
- *
- */
-public class DateAttachmentMapperTests extends AttachmentUnitTestCase {
-
- private DocumentMapperParser mapperParser;
-
- @Before
- public void setupMapperParser() throws Exception {
- mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
- }
-
- public void testSimpleMappings() throws Exception {
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/date/date-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
-
- // Our mapping should be kept as a String
- assertThat(docMapper.mappers().getMapper("file.date"), instanceOf(TextFieldMapper.class));
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/EncryptedDocMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/EncryptedDocMapperTests.java
deleted file mode 100644
index a10910cf62..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/EncryptedDocMapperTests.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.ParseContext;
-
-import java.io.IOException;
-
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.greaterThan;
-import static org.hamcrest.Matchers.nullValue;
-import static org.hamcrest.Matchers.startsWith;
-
-/**
- * Test for https://github.com/elastic/elasticsearch-mapper-attachments/issues/18
- * Note that we have converted /org/elasticsearch/index/mapper/xcontent/testContentLength.txt
- * to a /org/elasticsearch/index/mapper/xcontent/encrypted.pdf with password `12345678`.
- */
-public class EncryptedDocMapperTests extends AttachmentUnitTestCase {
-
- public void testMultipleDocsEncryptedLast() throws IOException {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html");
- byte[] pdf = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf");
-
- BytesReference json = jsonBuilder()
- .startObject()
- .field("file1", html)
- .field("file2", pdf)
- .endObject().bytes();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
- assertThat(doc.get(docMapper.mappers().getMapper("file1.content").fieldType().name()), containsString("World"));
- assertThat(doc.get(docMapper.mappers().getMapper("file1.title").fieldType().name()), equalTo("Hello"));
- assertThat(doc.get(docMapper.mappers().getMapper("file1.author").fieldType().name()), equalTo("kimchy"));
- assertThat(doc.get(docMapper.mappers().getMapper("file1.keywords").fieldType().name()), equalTo("elasticsearch,cool,bonsai"));
- assertThat(doc.get(docMapper.mappers().getMapper("file1.content_type").fieldType().name()), startsWith("text/html;"));
- assertThat(doc.getField(docMapper.mappers().getMapper("file1.content_length").fieldType().name()).numericValue().longValue(), greaterThan(0L));
-
- assertThat(doc.get(docMapper.mappers().getMapper("file2").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file2.title").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file2.author").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file2.keywords").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file2.content_type").fieldType().name()), nullValue());
- assertThat(doc.getField(docMapper.mappers().getMapper("file2.content_length").fieldType().name()), nullValue());
- }
-
- public void testMultipleDocsEncryptedFirst() throws IOException {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html");
- byte[] pdf = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf");
-
- BytesReference json = jsonBuilder()
- .startObject()
- .field("file1", pdf)
- .field("file2", html)
- .endObject().bytes();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
- assertThat(doc.get(docMapper.mappers().getMapper("file1").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file1.title").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file1.author").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file1.keywords").fieldType().name()), nullValue());
- assertThat(doc.get(docMapper.mappers().getMapper("file1.content_type").fieldType().name()), nullValue());
- assertThat(doc.getField(docMapper.mappers().getMapper("file1.content_length").fieldType().name()), nullValue());
-
- assertThat(doc.get(docMapper.mappers().getMapper("file2.content").fieldType().name()), containsString("World"));
- assertThat(doc.get(docMapper.mappers().getMapper("file2.title").fieldType().name()), equalTo("Hello"));
- assertThat(doc.get(docMapper.mappers().getMapper("file2.author").fieldType().name()), equalTo("kimchy"));
- assertThat(doc.get(docMapper.mappers().getMapper("file2.keywords").fieldType().name()), equalTo("elasticsearch,cool,bonsai"));
- assertThat(doc.get(docMapper.mappers().getMapper("file2.content_type").fieldType().name()), startsWith("text/html;"));
- assertThat(doc.getField(docMapper.mappers().getMapper("file2.content_length").fieldType().name()).numericValue().longValue(), greaterThan(0L));
- }
-
- public void testMultipleDocsEncryptedNotIgnoringErrors() throws IOException {
- try {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(),
- Settings.builder().put("index.mapping.attachment.ignore_errors", false).build(),
- getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html");
- byte[] pdf = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf");
-
- BytesReference json = jsonBuilder()
- .startObject()
- .field("file1", pdf)
- .field("file2", html)
- .endObject().bytes();
-
- docMapper.parse("person", "person", "1", json);
- fail("Expected doc parsing exception");
- } catch (MapperParsingException e) {
- if (e.getMessage() == null || e.getMessage().contains("is encrypted") == false) {
- // wrong exception
- throw e;
- }
- }
- }
-
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/LanguageDetectionAttachmentMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/LanguageDetectionAttachmentMapperTests.java
deleted file mode 100644
index 8c09c78740..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/LanguageDetectionAttachmentMapperTests.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.StringFieldMapper;
-import org.elasticsearch.index.mapper.TextFieldMapper;
-import org.junit.Before;
-
-import java.io.IOException;
-
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
-
-/**
- *
- */
-public class LanguageDetectionAttachmentMapperTests extends AttachmentUnitTestCase {
-
- private DocumentMapper docMapper;
-
- @Before
- public void setupMapperParser() throws IOException {
- setupMapperParser(true);
- }
-
- public void setupMapperParser(boolean langDetect) throws IOException {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(),
- Settings.builder().put("index.mapping.attachment.detect_language", langDetect).build(),
- getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/language/language-mapping.json");
- docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
-
- assertThat(docMapper.mappers().getMapper("file.language"), instanceOf(TextFieldMapper.class));
- }
-
- private void testLanguage(String filename, String expected, String... forcedLanguage) throws IOException {
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/" + filename);
-
- XContentBuilder xcb = jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_name", filename)
- .field("_content", html);
-
- if (forcedLanguage.length > 0) {
- xcb.field("_language", forcedLanguage[0]);
- }
-
- xcb.endObject().endObject();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", xcb.bytes()).rootDoc();
-
- // Our mapping should be kept as a String
- assertThat(doc.get(docMapper.mappers().getMapper("file.language").fieldType().name()), equalTo(expected));
- }
-
- public void testFrDetection() throws Exception {
- testLanguage("text-in-french.txt", "fr");
- }
-
- public void testEnDetection() throws Exception {
- testLanguage("text-in-english.txt", "en");
- }
-
- public void testFrForced() throws Exception {
- testLanguage("text-in-english.txt", "fr", "fr");
- }
-
- /**
- * This test gives strange results! detection of ":-)" gives "lt" as a result
- */
- public void testNoLanguage() throws Exception {
- testLanguage("text-in-nolang.txt", "lt");
- }
-
- public void testLangDetectDisabled() throws Exception {
- // We replace the mapper with another one which have index.mapping.attachment.detect_language = false
- setupMapperParser(false);
- testLanguage("text-in-english.txt", null);
- }
-
- public void testLangDetectDocumentEnabled() throws Exception {
- // We replace the mapper with another one which have index.mapping.attachment.detect_language = false
- setupMapperParser(false);
-
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-english.txt");
-
- XContentBuilder xcb = jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_name", "text-in-english.txt")
- .field("_content", html)
- .field("_detect_language", true)
- .endObject().endObject();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", xcb.bytes()).rootDoc();
-
- // Our mapping should be kept as a String
- assertThat(doc.get(docMapper.mappers().getMapper("file.language").fieldType().name()), equalTo("en"));
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MetadataMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MetadataMapperTests.java
deleted file mode 100644
index cb8f6746a8..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MetadataMapperTests.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.ParseContext;
-
-import java.io.IOException;
-
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.greaterThan;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.nullValue;
-import static org.hamcrest.Matchers.startsWith;
-
-/**
- * Test for https://github.com/elastic/elasticsearch-mapper-attachments/issues/38
- */
-public class MetadataMapperTests extends AttachmentUnitTestCase {
-
- protected void checkMeta(String filename, Settings otherSettings, Long expectedDate, Long expectedLength) throws IOException {
- Settings settings = Settings.builder()
- .put(this.testSettings)
- .put(otherSettings)
- .build();
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), settings, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/metadata/test-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/" + filename);
-
- BytesReference json = jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_name", filename)
- .field("_content", html)
- .endObject()
- .endObject().bytes();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), containsString("World"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.name").fieldType().name()), equalTo(filename));
- if (expectedDate == null) {
- assertThat(doc.getField(docMapper.mappers().getMapper("file.date").fieldType().name()), nullValue());
- } else {
- assertThat(doc.getField(docMapper.mappers().getMapper("file.date").fieldType().name()).numericValue().longValue(), is(expectedDate));
- }
- assertThat(doc.get(docMapper.mappers().getMapper("file.title").fieldType().name()), equalTo("Hello"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.author").fieldType().name()), equalTo("kimchy"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.keywords").fieldType().name()), equalTo("elasticsearch,cool,bonsai"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.content_type").fieldType().name()), startsWith("text/html;"));
- if (expectedLength == null) {
- assertNull(doc.getField(docMapper.mappers().getMapper("file.content_length").fieldType().name()).numericValue().longValue());
- } else {
- assertThat(doc.getField(docMapper.mappers().getMapper("file.content_length").fieldType().name()).numericValue().longValue(), greaterThan(0L));
- }
- }
-
- public void testIgnoreWithoutDate() throws Exception {
- checkMeta("htmlWithoutDateMeta.html", Settings.builder().build(), null, 300L);
- }
-
- public void testIgnoreWithEmptyDate() throws Exception {
- checkMeta("htmlWithEmptyDateMeta.html", Settings.builder().build(), null, 334L);
- }
-
- public void testIgnoreWithCorrectDate() throws Exception {
- checkMeta("htmlWithValidDateMeta.html", Settings.builder().build(), 1354233600000L, 344L);
- }
-
- public void testWithoutDate() throws Exception {
- checkMeta("htmlWithoutDateMeta.html", Settings.builder().put("index.mapping.attachment.ignore_errors", false).build(), null, 300L);
- }
-
- public void testWithEmptyDate() throws Exception {
- try {
- checkMeta("htmlWithEmptyDateMeta.html", Settings.builder().put("index.mapping.attachment.ignore_errors", false).build(), null, null);
- } catch (MapperParsingException expected) {
- assertTrue(expected.getMessage().contains("failed to parse"));
- }
- }
-
- public void testWithCorrectDate() throws Exception {
- checkMeta("htmlWithValidDateMeta.html", Settings.builder().put("index.mapping.attachment.ignore_errors", false).build(), 1354233600000L, 344L);
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MultifieldAttachmentMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MultifieldAttachmentMapperTests.java
deleted file mode 100644
index 84284aa812..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/MultifieldAttachmentMapperTests.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DateFieldMapper;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParsedDocument;
-import org.elasticsearch.index.mapper.TextFieldMapper;
-import org.junit.Before;
-
-import java.nio.charset.StandardCharsets;
-import java.util.Base64;
-
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.instanceOf;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.startsWith;
-
-/**
- *
- */
-public class MultifieldAttachmentMapperTests extends AttachmentUnitTestCase {
-
- private DocumentMapperParser mapperParser;
-
- @Before
- public void setupMapperParser() throws Exception {
- mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
-
- }
-
- public void testSimpleMappings() throws Exception {
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
-
-
- assertThat(docMapper.mappers().getMapper("file.content"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.content.suggest"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.date"), instanceOf(DateFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.date.string"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.title"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.title.suggest"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.name"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.name.suggest"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.author"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.author.suggest"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.keywords"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.keywords.suggest"), instanceOf(TextFieldMapper.class));
-
- assertThat(docMapper.mappers().getMapper("file.content_type"), instanceOf(TextFieldMapper.class));
- assertThat(docMapper.mappers().getMapper("file.content_type.suggest"), instanceOf(TextFieldMapper.class));
- }
-
- public void testExternalValues() throws Exception {
- String originalText = "This is an elasticsearch mapper attachment test.";
- String forcedName = "dummyname.txt";
-
- String bytes = Base64.getEncoder().encodeToString(originalText.getBytes(StandardCharsets.ISO_8859_1));
-
- MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper());
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json");
-
- DocumentMapper documentMapper = mapperService.documentMapperParser().parse("person", new CompressedXContent(mapping));
-
- ParsedDocument doc = documentMapper.parse("person", "person", "1", XContentFactory.jsonBuilder()
- .startObject()
- .field("file", bytes)
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().getField("file.content"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content").stringValue(), is(originalText + "\n"));
-
- assertThat(doc.rootDoc().getField("file.content_type"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_type").stringValue(), startsWith("text/plain;"));
- assertThat(doc.rootDoc().getField("file.content_type.suggest"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_type.suggest").stringValue(), startsWith("text/plain;"));
- assertThat(doc.rootDoc().getField("file.content_length"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_length").numericValue().intValue(), is(originalText.length()));
-
- assertThat(doc.rootDoc().getField("file.content.suggest"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content.suggest").stringValue(), is(originalText + "\n"));
-
- // Let's force some values
- doc = documentMapper.parse("person", "person", "1", XContentFactory.jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_content", bytes)
- .field("_name", forcedName)
- .endObject()
- .endObject()
- .bytes());
-
- assertThat(doc.rootDoc().getField("file.content"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content").stringValue(), is(originalText + "\n"));
-
- assertThat(doc.rootDoc().getField("file.content_type"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_type").stringValue(), startsWith("text/plain;"));
- assertThat(doc.rootDoc().getField("file.content_type.suggest"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_type.suggest").stringValue(), startsWith("text/plain;"));
- assertThat(doc.rootDoc().getField("file.content_length"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_length").numericValue().intValue(), is(originalText.length()));
-
- assertThat(doc.rootDoc().getField("file.content.suggest"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content.suggest").stringValue(), is(originalText + "\n"));
-
- assertThat(doc.rootDoc().getField("file.name"), notNullValue());
- assertThat(doc.rootDoc().getField("file.name").stringValue(), is(forcedName));
- // In mapping we have default store:false
- assertThat(doc.rootDoc().getField("file.name").fieldType().stored(), is(false));
- assertThat(doc.rootDoc().getField("file.name.suggest"), notNullValue());
- assertThat(doc.rootDoc().getField("file.name.suggest").stringValue(), is(forcedName));
- // In mapping we set store:true for suggest subfield
- assertThat(doc.rootDoc().getField("file.name.suggest").fieldType().stored(), is(true));
- }
-
- public void testAllExternalValues() throws Exception {
- String originalText = "This is an elasticsearch mapper attachment test.";
- String forcedName = randomAsciiOfLength(20);
- String forcedLanguage = randomAsciiOfLength(20);
- String forcedContentType = randomAsciiOfLength(20);
-
- String bytes = Base64.getEncoder().encodeToString(originalText.getBytes(StandardCharsets.ISO_8859_1));
-
- MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(),
- Settings.builder().put(AttachmentMapper.INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING.getKey(), true).build(),
- getIndicesModuleWithRegisteredAttachmentMapper());
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json");
-
- DocumentMapper documentMapper = mapperService.documentMapperParser().parse("person", new CompressedXContent(mapping));
-
- ParsedDocument doc = documentMapper.parse("person", "person", "1", XContentFactory.jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_content", bytes)
- .field("_name", forcedName)
- .field("_language", forcedLanguage)
- .field("_content_type", forcedContentType)
- .endObject()
- .endObject()
- .bytes());
-
- // Note that we don't support forcing values for _title and _keywords
-
- assertThat(doc.rootDoc().getField("file.content"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content").stringValue(), is(originalText + "\n"));
-
- assertThat(doc.rootDoc().getField("file.name"), notNullValue());
- assertThat(doc.rootDoc().getField("file.name").stringValue(), is(forcedName));
- assertThat(doc.rootDoc().getField("file.language"), notNullValue());
- assertThat(doc.rootDoc().getField("file.language").stringValue(), is(forcedLanguage));
- assertThat(doc.rootDoc().getField("file.content_type"), notNullValue());
- assertThat(doc.rootDoc().getField("file.content_type").stringValue(), is(forcedContentType));
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java
deleted file mode 100644
index 6b80baa7c2..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.ParseContext;
-
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.startsWith;
-
-/**
- *
- */
-public class SimpleAttachmentMapperTests extends AttachmentUnitTestCase {
-
- public void testSimpleMappings() throws Exception {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html");
-
- BytesReference json = jsonBuilder().startObject().field("file", html).endObject().bytes();
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
-
- assertThat(doc.get(docMapper.mappers().getMapper("file.content_type").fieldType().name()), startsWith("application/xhtml+xml"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.title").fieldType().name()), equalTo("XHTML test document"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), containsString("This document tests the ability of Apache Tika to extract content"));
-
- // re-parse it
- String builtMapping = docMapper.mappingSource().string();
- docMapper = mapperParser.parse("person", new CompressedXContent(builtMapping));
-
- json = jsonBuilder().startObject().field("file", html).endObject().bytes();
-
- doc = docMapper.parse("person", "person", "1", json).rootDoc();
-
- assertThat(doc.get(docMapper.mappers().getMapper("file.content_type").fieldType().name()), startsWith("application/xhtml+xml"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.title").fieldType().name()), equalTo("XHTML test document"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), containsString("This document tests the ability of Apache Tika to extract content"));
- }
-
- /**
- * test for https://github.com/elastic/elasticsearch-mapper-attachments/issues/179
- */
- public void testSimpleMappingsWithAllFields() throws Exception {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping-all-fields.json");
- DocumentMapper docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html");
-
- BytesReference json = jsonBuilder().startObject().field("file", html).endObject().bytes();
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
-
- assertThat(doc.get(docMapper.mappers().getMapper("file.content_type").fieldType().name()), startsWith("application/xhtml+xml"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.title").fieldType().name()), equalTo("XHTML test document"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), containsString("This document tests the ability of Apache Tika to extract content"));
-
- // re-parse it
- String builtMapping = docMapper.mappingSource().string();
- docMapper = mapperParser.parse("person", new CompressedXContent(builtMapping));
-
- json = jsonBuilder().startObject().field("file", html).endObject().bytes();
-
- doc = docMapper.parse("person", "person", "1", json).rootDoc();
-
- assertThat(doc.get(docMapper.mappers().getMapper("file.content_type").fieldType().name()), startsWith("application/xhtml+xml"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.title").fieldType().name()), equalTo("XHTML test document"));
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), containsString("This document tests the ability of Apache Tika to extract content"));
- }
-
- /**
- * See issue https://github.com/elastic/elasticsearch-mapper-attachments/issues/169
- * Mapping should not contain field names with dot.
- */
- public void testMapperErrorWithDotTwoLevels169() throws Exception {
- XContentBuilder mappingBuilder = jsonBuilder();
- mappingBuilder.startObject()
- .startObject("mail")
- .startObject("properties")
- .startObject("attachments")
- .startObject("properties")
- .startObject("innerfield")
- .field("type", "attachment")
- .endObject()
- .endObject()
- .endObject()
- .endObject()
- .endObject()
- .endObject();
-
- byte[] mapping = BytesReference.toBytes(mappingBuilder.bytes());
- MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper());
- DocumentMapper docMapper = mapperService.parse("mail", new CompressedXContent(mapping), true);
- // this should not throw an exception
- mapperService.parse("mail", new CompressedXContent(docMapper.mapping().toString()), true);
- // the mapping may not contain a field name with a dot
- assertFalse(docMapper.mapping().toString().contains("."));
- }
-
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java
deleted file mode 100644
index b32a6ab79a..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java
+++ /dev/null
@@ -1,65 +0,0 @@
-package org.elasticsearch.mapper.attachments;
-
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
-import org.apache.lucene.util.TestUtil;
-import org.apache.tika.metadata.Metadata;
-import org.elasticsearch.test.ESTestCase;
-
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-/**
- * Evil test-coverage cheat, we parse a bunch of docs from tika
- * so that we have a nice grab-bag variety, and assert some content
- * comes back and no exception.
- */
-@SuppressFileSystems("ExtrasFS") // don't try to parse extraN
-public class TikaDocTests extends ESTestCase {
-
- /** some test files from tika test suite, zipped up */
- static final String TIKA_FILES = "/org/elasticsearch/index/mapper/attachment/test/tika-files.zip";
-
- public void testFiles() throws Exception {
- Path tmp = createTempDir();
- TestUtil.unzip(getClass().getResourceAsStream(TIKA_FILES), tmp);
-
- try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmp)) {
- for (Path doc : stream) {
- logger.debug("parsing: {}", doc);
- assertParseable(doc);
- }
- }
- }
-
- void assertParseable(Path fileName) throws Exception {
- try {
- byte bytes[] = Files.readAllBytes(fileName);
- String parsedContent = TikaImpl.parse(bytes, new Metadata(), -1);
- assertNotNull(parsedContent);
- assertFalse(parsedContent.isEmpty());
- logger.debug("extracted content: {}", parsedContent);
- } catch (Exception e) {
- throw new RuntimeException("parsing of filename: " + fileName.getFileName() + " failed", e);
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/VariousDocTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/VariousDocTests.java
deleted file mode 100644
index 17e4ec6818..0000000000
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/VariousDocTests.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.mapper.attachments;
-
-import org.apache.tika.io.IOUtils;
-import org.apache.tika.metadata.Metadata;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.MapperTestUtils;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.DocumentMapperParser;
-import org.elasticsearch.index.mapper.ParseContext;
-import org.junit.Before;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.AUTHOR;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.CONTENT_LENGTH;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.CONTENT_TYPE;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.DATE;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.KEYWORDS;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.LANGUAGE;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.NAME;
-import static org.elasticsearch.mapper.attachments.AttachmentMapper.FieldNames.TITLE;
-import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.hamcrest.Matchers.isEmptyOrNullString;
-import static org.hamcrest.Matchers.not;
-
-/**
- * Test for different documents
- */
-public class VariousDocTests extends AttachmentUnitTestCase {
-
- protected DocumentMapper docMapper;
-
- @Before
- public void createMapper() throws IOException {
- DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser();
-
- String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/unit/various-doc/test-mapping.json");
- docMapper = mapperParser.parse("person", new CompressedXContent(mapping));
- }
-
- /**
- * Test for https://github.com/elastic/elasticsearch-mapper-attachments/issues/104
- */
- public void testWordDocxDocument104() throws Exception {
- assertParseable("issue-104.docx");
- testMapper("issue-104.docx", false);
- }
-
- /**
- * Test for encrypted PDF
- */
- public void testEncryptedPDFDocument() throws Exception {
- assertException("encrypted.pdf", "is encrypted");
- testMapper("encrypted.pdf", true);
- }
-
- /**
- * Test for HTML
- */
- public void testHtmlDocument() throws Exception {
- assertParseable("htmlWithEmptyDateMeta.html");
- testMapper("htmlWithEmptyDateMeta.html", false);
- }
-
- /**
- * Test for XHTML
- */
- public void testXHtmlDocument() throws Exception {
- assertParseable("testXHTML.html");
- testMapper("testXHTML.html", false);
- }
-
- /**
- * Test for TXT
- */
- public void testTxtDocument() throws Exception {
- assertParseable("text-in-english.txt");
- testMapper("text-in-english.txt", false);
- }
-
- /**
- * Test for .epub
- */
- public void testEpubDocument() throws Exception {
- assertParseable("testEPUB.epub");
- testMapper("testEPUB.epub", false);
- }
-
- /**
- * Test for ASCIIDOC
- * Not yet supported by Tika: https://github.com/elastic/elasticsearch-mapper-attachments/issues/29
- */
- public void testAsciidocDocument() throws Exception {
- assertParseable("asciidoc.asciidoc");
- testMapper("asciidoc.asciidoc", false);
- }
-
- void assertException(String filename, String expectedMessage) throws Exception {
- try (InputStream is = VariousDocTests.class.getResourceAsStream("/org/elasticsearch/index/mapper/attachment/test/sample-files/" + filename)) {
- byte bytes[] = IOUtils.toByteArray(is);
- TikaImpl.parse(bytes, new Metadata(), -1);
- fail("expected exception");
- } catch (Exception e) {
- if (e.getMessage() != null && e.getMessage().contains(expectedMessage)) {
- // ok
- } else {
- // unexpected
- throw e;
- }
- }
- }
-
- protected void assertParseable(String filename) throws Exception {
- try (InputStream is = VariousDocTests.class.getResourceAsStream("/org/elasticsearch/index/mapper/attachment/test/sample-files/" + filename)) {
- byte bytes[] = IOUtils.toByteArray(is);
- String parsedContent = TikaImpl.parse(bytes, new Metadata(), -1);
- assertThat(parsedContent, not(isEmptyOrNullString()));
- logger.debug("extracted content: {}", parsedContent);
- }
- }
-
- protected void testMapper(String filename, boolean errorExpected) throws IOException {
- byte[] html = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/attachment/test/sample-files/" + filename);
-
- BytesReference json = jsonBuilder()
- .startObject()
- .startObject("file")
- .field("_name", filename)
- .field("_content", html)
- .endObject()
- .endObject().bytes();
-
- ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc();
- if (!errorExpected) {
- assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().name()), not(isEmptyOrNullString()));
- logger.debug("-> extracted content: {}", doc.get(docMapper.mappers().getMapper("file").fieldType().name()));
- logger.debug("-> extracted metadata:");
- printMetadataContent(doc, AUTHOR);
- printMetadataContent(doc, CONTENT_LENGTH);
- printMetadataContent(doc, CONTENT_TYPE);
- printMetadataContent(doc, DATE);
- printMetadataContent(doc, KEYWORDS);
- printMetadataContent(doc, LANGUAGE);
- printMetadataContent(doc, NAME);
- printMetadataContent(doc, TITLE);
- }
- }
-
- private void printMetadataContent(ParseContext.Document doc, String field) {
- logger.debug("- [{}]: [{}]", field, doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name()));
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/asciidoc.asciidoc b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/asciidoc.asciidoc
deleted file mode 100644
index dc06d4e83d..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/asciidoc.asciidoc
+++ /dev/null
@@ -1,5 +0,0 @@
-[[tika-asciidoc]]
-= AsciiDoc test
-
-Here is a test of the asciidoc format.
-
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf
deleted file mode 100644
index 569a904a31..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/encrypted.pdf
+++ /dev/null
Binary files differ
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithEmptyDateMeta.html b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithEmptyDateMeta.html
deleted file mode 100644
index f151208e38..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithEmptyDateMeta.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
-<html lang="fr">
-<head>
- <title>Hello</title>
- <meta name="date" content="">
- <meta name="Author" content="kimchy">
- <meta name="Keywords" content="elasticsearch,cool,bonsai">
-</head>
-<body>World</body>
-</html>
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html
deleted file mode 100644
index 79b5a6234e..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithValidDateMeta.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
-<html lang="fr">
-<head>
- <title>Hello</title>
- <meta name="date" content="2012-11-30">
- <meta name="Author" content="kimchy">
- <meta name="Keywords" content="elasticsearch,cool,bonsai">
-</head>
-<body>World</body>
-</html>
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithoutDateMeta.html b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithoutDateMeta.html
deleted file mode 100644
index 3322fa3a73..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/htmlWithoutDateMeta.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
-<html lang="fr">
-<head>
- <title>Hello</title>
- <meta name="Author" content="kimchy">
- <meta name="Keywords" content="elasticsearch,cool,bonsai">
-</head>
-<body>World</body>
-</html>
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/issue-104.docx b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/issue-104.docx
deleted file mode 100644
index f126e20b32..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/issue-104.docx
+++ /dev/null
Binary files differ
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testContentLength.txt b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testContentLength.txt
deleted file mode 100644
index d392c2d097..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testContentLength.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Begin
-
-BeforeLimit AfterLimit
-
-Broadway
-
-Nearing the end
-
-End \ No newline at end of file
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testEPUB.epub b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testEPUB.epub
deleted file mode 100644
index a6fc2e634d..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testEPUB.epub
+++ /dev/null
Binary files differ
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html
deleted file mode 100644
index f5564f025d..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/testXHTML.html
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
- <title>XHTML test document</title>
- <meta name="Author" content="Tika Developers"/>
- <meta http-equiv="refresh" content="5"/>
-</head>
-<body>
-<p>
- This document tests the ability of Apache Tika to extract content
- from an <a href="http://www.w3.org/TR/xhtml1/">XHTML document</a>.
-</p>
-</body>
-</html>
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-english.txt b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-english.txt
deleted file mode 100644
index 0828092603..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-english.txt
+++ /dev/null
@@ -1 +0,0 @@
-"God Save the Queen" (alternatively "God Save the King" \ No newline at end of file
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-french.txt b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-french.txt
deleted file mode 100644
index e4619fb1b8..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-french.txt
+++ /dev/null
@@ -1 +0,0 @@
-Allons enfants de la Patrie Le jour de gloire est arrivé. Contre nous de la tyrannie \ No newline at end of file
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-nolang.txt b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-nolang.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/sample-files/text-in-nolang.txt
+++ /dev/null
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json
deleted file mode 100644
index c8680cf064..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "person":{
- "properties":{
- "file":{
- "type":"attachment"
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/tika-files.zip b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/tika-files.zip
deleted file mode 100644
index 10f5d50767..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/tika-files.zip
+++ /dev/null
Binary files differ
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/date/date-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/date/date-mapping.json
deleted file mode 100644
index bf5824a6f4..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/date/date-mapping.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "person": {
- "properties": {
- "file": {
- "type": "attachment",
- "fields": {
- "date": { "type": "text" }
- }
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json
deleted file mode 100644
index 7dc796c2b1..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/encrypted/test-mapping.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "person":{
- "properties":{
- "file1":{
- "type":"attachment"
- },
- "file2":{
- "type":"attachment"
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/language/language-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/language/language-mapping.json
deleted file mode 100644
index 5d629c4e1a..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/language/language-mapping.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "person": {
- "properties": {
- "file": {
- "type": "attachment",
- "fields": {
- "language": { "type": "text" }
- }
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/metadata/test-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/metadata/test-mapping.json
deleted file mode 100644
index c8680cf064..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/metadata/test-mapping.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "person":{
- "properties":{
- "file":{
- "type":"attachment"
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json
deleted file mode 100644
index b3bd077432..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/multifield/multifield-mapping.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "person": {
- "properties": {
- "file": {
- "type": "attachment",
- "fields": {
- "content": {
- "type": "text",
- "fields": {
- "suggest": { "type": "text" }
- }
- },
- "date": {
- "type": "date",
- "fields": {
- "string": { "type": "text" }
- }
- },
- "title": {
- "type": "text",
- "fields": {
- "suggest": { "type": "text" }
- }
- },
- "name": {
- "type": "text",
- "fields": {
- "suggest": {
- "type": "text",
- "store": true
- }
- }
- },
- "author": {
- "type": "text",
- "fields": {
- "suggest": { "type": "text" }
- }
- },
- "keywords": {
- "type": "text",
- "fields": {
- "suggest": { "type": "text" }
- }
- },
- "content_type": {
- "type": "text",
- "fields": {
- "suggest": { "type": "text" }
- }
- }
- }
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping-all-fields.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping-all-fields.json
deleted file mode 100644
index feaa3a5455..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping-all-fields.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "person":{
- "properties":{
- "file":{
- "type":"attachment",
- "fields" : {
- "content" : {"store" : true},
- "title" : {"store" : true},
- "date" : {"store" : true},
- "author" : {"analyzer" : "standard"},
- "keywords" : {"store" : true},
- "content_type" : {"store" : true},
- "content_length" : {"store" : true},
- "language" : {"store" : true}
- }
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping.json
deleted file mode 100644
index c8680cf064..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/simple/test-mapping.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "person":{
- "properties":{
- "file":{
- "type":"attachment"
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/various-doc/test-mapping.json b/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/various-doc/test-mapping.json
deleted file mode 100644
index c8680cf064..0000000000
--- a/plugins/mapper-attachments/src/test/resources/org/elasticsearch/index/mapper/attachment/test/unit/various-doc/test-mapping.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "person":{
- "properties":{
- "file":{
- "type":"attachment"
- }
- }
- }
-}
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/00_basic.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/00_basic.yaml
deleted file mode 100644
index 9654535f2a..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/00_basic.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Integration tests for plugin: check name is correct
-#
-"Mapper attachments loaded":
- - do:
- cluster.state: {}
-
- # Get master node id
- - set: { master_node: master }
-
- - do:
- nodes.info: {}
-
- - match: { nodes.$master.plugins.0.name: mapper-attachments }
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/10_index.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/10_index.yaml
deleted file mode 100644
index 5d95ce425c..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/10_index.yaml
+++ /dev/null
@@ -1,148 +0,0 @@
-# Integration tests for Mapper Attachments plugin
-#
-
----
-# https://github.com/elastic/elasticsearch-mapper-attachments/issues/23
-"Index empty attachment":
-
- - do:
- indices.create:
- index: test
- body:
- mappings:
- doc:
- properties:
- file:
- type: attachment
- - do:
- catch: /(.)*mapper_parsing_exception.+No content is provided\.(.)*/
- index:
- index: test
- type: doc
- id: 1
- body:
- file: { }
-
----
-# https://github.com/elastic/elasticsearch-mapper-attachments/issues/18
-# Encoded content with https://www.base64encode.org/
-# File1
-#<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-# "http://www.w3.org/TR/html4/loose.dtd">
-#<html lang="fr">
-#<head>
-# <title>Hello</title>
-# <meta name="date" content="2012-11-30">
-# <meta name="Author" content="kimchy">
-# <meta name="Keywords" content="elasticsearch,cool,bonsai">
-#</head>
-#<body>World</body>
-#</html>
-# File2 is an encrypted PDF with a password
-
-"Multiple Attachments With Encrypted Doc Ignore Failures":
-
- - do:
- indices.create:
- index: test
- body:
- settings:
- index.mapping.attachment.ignore_errors: true
- mappings:
- doc:
- properties:
- file1:
- type: attachment
- file2:
- type: attachment
-
- - do:
- index:
- index: test
- type: doc
- id: 1
- body:
- file1: "PCFET0NUWVBFIEhUTUwgUFVCTElDICItLy9XM0MvL0RURCBIVE1MIDQuMDEgVHJhbnNpdGlvbmFsLy9FTiINCiAgICAgICAgImh0dHA6Ly93d3cudzMub3JnL1RSL2h0bWw0L2xvb3NlLmR0ZCI+DQo8aHRtbCBsYW5nPSJmciI+DQo8aGVhZD4NCiAgICA8dGl0bGU+SGVsbG88L3RpdGxlPg0KICAgIDxtZXRhIG5hbWU9ImRhdGUiIGNvbnRlbnQ9IjIwMTItMTEtMzAiPg0KICAgIDxtZXRhIG5hbWU9IkF1dGhvciIgY29udGVudD0ia2ltY2h5Ij4NCiAgICA8bWV0YSBuYW1lPSJLZXl3b3JkcyIgY29udGVudD0iZWxhc3RpY3NlYXJjaCxjb29sLGJvbnNhaSI+DQo8L2hlYWQ+DQo8Ym9keT5Xb3JsZDwvYm9keT4NCjwvaHRtbD4NCg=="
- file2: "%PDF-1.4
%äüöß
2 0 obj
<</Length 3 0 R/Filter/FlateDecode>>
stream
0~k?????l5?7,>??˟Xje?.L?K1-?&?[D3?HV???v?`v?WqDNA?i<ZM??MQ?w??+T?f&??NH?xY??D???}@ ?IgҞa?u???D??=DO?H??o?o???y?QY?t???8??kc?yf?????n?Mf|Ezu\&?Oy?j,?=x_)??K0m
endstream
endobj

3 0 obj
186
endobj

4 0 obj
<</Type/XObject
/Subtype/Form
/BBox[ -9 420 604 420.1 ]
/Group<</S/Transparency/CS/DeviceRGB/K true>>
/Length 8
/Filter/FlateDecode
>>
stream
?? ?
ۉ
endstream
endobj

5 0 obj
<</CA 0.5
   /ca 0.5
>>
endobj

7 0 obj
<</Length 8 0 R/Filter/FlateDecode/Length1 19920>>
stream
?G?ˊ?e?Q?Q??0+?3B?7M??M?
_w?;4??L??A??7??µ???s?9OS$<]?ղ?C?܎OYc???R??!6???Ik??u???
                                                        {???????y?v?? )t"?l7?????mk??ۮ??sU#7??3?P??u?U?,?\)??z?J?=?? ??:@?".væ2?凚?????u]??e?(???O??qA?nXs|8?u?@{??,yX?Nc؄a˷????b_K?&??????A2?*(???y????m??N??l?
??ɱX??????V/kytlK?y\k?w?L6Q??C!???[?,???f?:?f?  ?.o?a?
?z?в?????L6QWi???(??:1??mwx?r$??RY??+?T!Á,?-?????K?`۝???|=>???o???TaF?|,N=???N?r?d?B?
TP??͛?Y?ٓV&???]ɐ`"?ㅰ??-?k|2+?sP:??M?O?{??OV?Ԧ??}???????R?
                                     s
?w?j??P?Mw??O?q?|?ѫ?
                    ?
                     |A4?Th???vk*??m???
                                       ??????:*
?޺BO[?8?,??A???X?       ??[????Z?{yCr?~;|M??c?Bø1G?????֡??~??oC??F?q8?e???q??kX?n?T?	??iۙ릚??(	g?J??x??ԋ
?L0??7sx?t~???$?????*???V??CV:?>??a??k?q?s???ܓZ)???Ə?s??̾m:`֗Eέe??t۞??=K0(?j?}??z??4@?`??7?O[Y?,?/W0??b??1Q?B<??
??F?/??3?ŋ?1???᧩???ګKk ^??YT???#.nY??Dg??X????ě????!m???F
                                                         ?Iܙ?wϲO?ĭO?qR?O3??\?2x?>k/FJa?'SuY	4u~?5j?a??i߅𸳋J?΃??{? ??ue?ďj??I????I?e.??O?Ą?UE{?9A??ct?zN_??s????hP?????WM??[(???7?<4Z???z?(???6??ƪB?@?8R?Q?ۨ2y?.!A?
?4?A*7?|?Ղ?_|? ?ڵ?雈?F??6?B>???$??`???P@\????k?܂?
                                                 ?L??0????z?\???????W?IR$~??g?{-o?ҧR?hq??Wk?ic?4"??B4?$ ?X??n??f쐨r?u.??2!?4?H????3?DH???]?ب?'?bw??#??F8?-?ӈ?Ȧ?w?WC????(z?
\?UΠ??q?/???0
             ?1cW?(?&5}??\V?N3??_????@!??~??6QS????????Gtr2??	Z?N??3
                                                                      ?\?*?R????(?????1?%?Z\??4?u?;?z??W?(Ώ??י????9<?3??~??1
??եo??aۇ.?tv<&??-w?oh?ӯ?:??2??)'??1?'?	|?#Sӵ:???6܇?2?A?*?41M??j??b???jAZT??6I5???:ͽ?? ???c??P?
               ???6/{̣S]-?%??/????c1;?j#$#d?	???dm??-7?v????y?~n?tM?p7懑!|?2??4??4F?"?ʌ7?p?֏}.?S?#(
                      N???
                          =<??x0??!?Je???m???ጱ?H?n?P?J(?qTz	??O???OӼ9?լ???(f?K?G@???{??!iB?^_2v?TV28?H?.?X??{????C?F<iх߬7,??r?ц??_S:m??H??_R?3h	?g?lU6?w????U?????v??????Xy!C?n$???
                           ?r????J鮻'???+)*L9S???????/?C?????Xme????x??\?5%"[~'??Dr??%}?Hu_GI?AAO?ճ???k??/?z@??aM?(?Rb??????'%E
                                               Y=??Ī???jRy?PYK????{w]}?P?ك????(?!????31??
         t????T???v?x=???????R?3???-*?nHy??0͓?rQ??ݕZ?n???a?w?j?Ѓ? ??Q?/!u?f]g%?X?t{?=i`?z?C?m]?Sd=?p?%?h?
                        ?\?????e???b٪k~??z7ފ?t?"9????*3??y?Sh?? g???H?k?Y????GP4??z?t?`CQ?瀏??^?Q?u??NJ?c[4?q?ѕ?y?R??'?or?!?%??`L??bc?Q????q?p?d?T?;????w?????]BJ.?Ϥ?sm?U
?v?9o??=({?=??p?fjw????O???????Y???vp?L?~??\֋????i~?$????>???	?կI?w??k|?&V
                                                  ?~R?u?|&?Ba??VV?????G+?E$?kJ?9>?uwOH?D??????a?-8?????p?1?^D7?sz?[?+RSZ=??9?N?Pj?G?{???M4
z???h}?(z?8?8????ތ.?'??'?QR??@??$??$X
???
   ???<??XI??h?s?p<?t????4a?zHE-?{U??)??ԫVtX$f???j?"/??mL}?eܹ#R?C?????B1??d?eo?
                                                                               u+>t>ԃtK?Q????m\????Oɑ1?aN>?/4pnU
                                ;>?*f?<?mZI??b?1@?h??!u?^?|̮?,+ma??00'?z?[???]\?`ZMP"?iA????삑4jE3?u?Tr??p?PR???N?e$~g??p???NGn???S}?q?R??Kk??/~??ڶ?I3??ݤ_:?
                                                                           B?\?N?t>hi??;??jk,g?˴??z?ź!??wf?????Y{{????.?p>o(xnzۨEm~3B???o.????;????	?{3??K?"&/U"?y???U????
              ??dM????U+??.?s?wSͻ@?
                                   ?1&???h?H?[?J7?f???ř?3?x24?8dwg??&???0yJ?C??%;ʍn?or?Mm?	Ðh???Ma,?!???l
y*?x?7?jZ"??<_??z?@??#??H?=?Է?P?W9[?\??????Hn?q=D?ԯ?????5-[???l???{_f??<?t5֮??ܼ???J?]?H|??"?$????n???'????7!,vQ??̊??<
                                  *?^qa?j?]??D<ח??Ą??YF???Eޞ!0?SV??m???p^??0?^??7?҈?`???5f????9M??[???n?z?/??
                             0??<rH????~??	S-04|u??ZL??,???Η7L-?Ǡ??!2??????"?zů?J?]w??????vcU??? ?p???:X핎:W?l?#@j??.??????^????????dN3]G-fO{t?=?j9 H4?Vr?????=??h'$?|?IG??u?;s5???/ʰ?|???6?p???RV?W?g?,??Z{5?zs!o? t+?)?WE?
???*??'ɞ?a??????@_!c????&dǎe"?E??8??oe?????`???o???:?)2??+??q????-?-Hp??ܰn??x!??0?By?ʣfO?tJ???NR?6?bV,vʺ3?
                     ?.n@`?Vs;??B??i???(?G?5???_??Yr?A?????)?K&?٭???;8??????u???P?n6"*6׷??b?bIK?f&???2(?+??f\1?z]?%?Xˠ?H/????E???@P̣?W??7?E??Pn??????8+O???fz?*&??ҟ?€?N?O)$???{d?h?<??ԷZeT???????'???"???S
                                          ?N????-z9??ձ3?'?d91????ҫ?
                                                                   ?oc??D2ՐLB??A?!??a?ȹ?x?h??G?H̟????Q??????2?zҳ?}?$'??????J???4/????@U????d??????^|?n??Aח??S?<???zgyz?_y??w?1?5??????9?Ös??.+o???YK?#???Ԙ???g?YO???
                                                   e??2??v?֧=?????<%?<?f?Yq?9?4;[΄?\?W????1ڜ??:tʅ?0????\???n?L??t?e??????-t??.????d)a???<???ot?]
                                                               ;?BQ&Tށ?gf9?#v͇@??BF8?jf?n?%K?T}x~U?????-5!?c??q+?f?t4ffmA)?#??"?v?-[>???~<Ԯ????r
                                                               ?f?????~??????'???.???M ????bv	??R??*?Ĉ?v_???նN??b?{k??0??BU?a? ?+??!AQ?5=|?A`"??|n?L????My.(????r*???%I"??=-^?M??#??????~?0MǷɩ???E??r???d?P.K???l"?lP]??$?C???J	}
N?????9?lG???l̂?iU7 &?D(l??H???m???W#?̨?+ۙJ?y:7?@?|W}?a???tyh??Z????a??z?)?????m?4%?7?ҵZ؅?K??X?~ӧ?????????ѵ???53*9د?׉
D*?7#k?w)?ɫR?
?????o'?w'x??;x??5????/??أAD2??P??????#LZ?.??Pé?24???^:?Ҫ???V?ۍ??-yg??o???$??
/?|S\?9T???9_????Z???ȱ!p?tU??F?!˴E??X?cIU?%x???3??YX:w?0???w?W?2
                                                                *
                                                                 ?ӣ??'?'`??${????i0?BJ?_?Ld?yY??????	?[?Qކx(\֘6??"?P%?*???-??h??n???>???????5.3??9???=??O??F%?C(ヮ??m??EN?UuP%co??:?Jyӹ??8??U???y,ZU????`?;????˝^#wb:iͰ?bQ?	?n??۩?X?0??#i?o?.?A??6B!??ߏ%??s???nݵ?y8?,?jk?|??{?V?<?ַ?ߊ?.??`@vV҂??"j??G?P?b?nt?փ????#?&H??.?G?hD??ƨ"??%?%????G3???{?w7??ݓG'?
                              ?w?????Ib`?Y?:?&Vmr??siX????n??_???4jy??KE*?ΘRx?x?;#23d$5??p+2"?\?<=$ɧ?YG???_M%??eB|JLBiւ?6?C՝?TvV_?r???s?ڤL?ò\[????|ml?	???c??3??????m?'"?>?{vԙ
               ?L??Ju2?b?`?i?f?<??`_??^?aU??ت?!?K??W`??@?D	?O?~??P~???gT????ne?o?璄???>??{$??tvj?s?#7?K?&6?֥?j??ݎ?x?&oc?/?r????%????p>G?%?H*&????4?!QC???Ϩ??dE???>Q?!?TSAJ\?bt??\ ?q8?!??Ԍ???d??*
??(l?Y?*??uS?]F)???+???????ߊײqI[?w??ZE?I38WNY??xs???Ĕ|??]IU8.7??/????????i?Ј??	Yxwh???Q?bYЄ?y???H}?O???Nځ??׻`???? ???V?
??ILF??G:;#F6&?v*?GW<??AH??o0??2a?????S)T????|KU???T	7?z????¶uI??NJr???ܬ?"=??B?q??R.?
          f??q?1?1?	?????>$|?.?ju?s`p?qS?Ց	?n?e????Nv:?˵????,?
                                                                   =\??%????X?˄?Q???Ǎ??WtՐ??w??|????-8f4??޺?|??{i?WA)?????+'??'?@:Mׁ??9Qou?'?S5eD?T7???A???mdv??"?p?rOhF?$I?߫2?%Y`??????auy?͗*T?G>?????]?d0?Y4>8]??e??CX????H?ؑY?Ԕ??3?;?|??vM??z???骛???u???ܳ?A4?????h??e?ћȉ?????j????(Wd???iBQP? D???疷???Ջ???C\5?/Av?fp???o@??????oD$???7???Xu?+
              톱X?Q?;\Q????[??ا?ؗ?w
v?xS??`?%?o???4??ZD?2?fc??.?Dz???Uo?^>?&??u??H?ڣ??&
                                                   ~?????=z?gGT??b???s<'??m?kx?R?#y?6?????FP4<??A?h??,??{1??DgEpF???	??u&???nv޽6????l7?UOt?*?e???Wq??^??+Wz??1?m?(\ZB?v=t/M(??$?_?5??L~?????-D\RV?}ܒ??4??
??
?!???w(Ƀ????:7?2{G??(?f??,M????0?rjKGX܄0U?}???Ӌ?3?l?p?w
                                                       ?=e6???2fR?"+h
                                                                     KP?'??]&(???v?@l???rx?vhRrc?T%??E޼k??+???6?Ri>:g????3??????_V??fX?????l?אָ%???$1?
"2???5GJ??O6???1?Ow???
hQH??)e4???=\?L?/?!A?=?^?$??e?????r>ka?O?j3?)?????eA?۸???!?/?]o?b?G?C#nk??1Lb?X??h???????8&?q?q!
?o??Y9( ?H@?!\?r ?G????+
                    .0߃+???-?3$i???D?A??2h??v?`?-?{?'2?XGr???H!uqy^cd?c?????*H6ڃ3?3???I?
        ??'/M ?Q??K????*
                         ?
8?l?Ţ????q<˟y??(??x???r?l??
??SA?|z9?g/8Mf?k&??*:?Wk?????B??N????ۇ^?H?\0r??\????Ǡ????!R??6_?	$?3YY'm??R?`b?e??g:?ud?_???9~???*?hHߡ???5??AOO?4??Mp?u?2h??^?{baT????S?"b????k?l??????sp?`yO?.*?s?0??q?7?5߅Fl?`m??<?ͅ?Mi????T??G?????%vG}?5ͧ????x??}@%???gSvlݍ?#.?? ?lid??4T?+?????????V?.???Z??	!???s??F6
Y
 ?1??}
?m?aq?/@?X;G?a?6m?p?UfqZ?I?L?'S???%??9???7?NN??p?A$mJ?X???2ͩ??l?{*??G̫???Ȟ
                                                        ???H??H?qQ???Ck<y?j??;
                                                                              Q9?s????z!]չmaL??1?k??Ȳ????x?1?D?}?5?? ????????]u???!O?P?8M??2?P???9(C?!?[pJ??ɒs?e?<?V??v?E?6'??U?$?F?V?P;??j?~???'V?ud?oq???b???rq????'?N?Г8{ҥ?0?Q\ނOm?'?=O?j#??'?????-*䫦??9?%X??????????kS㘝$$?Q?Ƽ??!̈?Qzf??ј???KQ?J1?????r?o?ք?nKy?5~??Yޔ??M?v???x??a<??]?S?0?}a?u?:]~K3?rhy?*V??aj?v2T?[??????M?S?????tD6?Kkw????߄r
                                                                     P"????k??b?Q{???????Z??
p??0?7?????5Q?Bӎ?:j??>)?r???9
                             ? ???˶?x?T_z?|N???kUK?i?s?nG?jp???!?@7}????nO?????v:&?3gNv?L?,zd??E?k?&wy??ي????|?????????7????
?s?????U?????V??Tw+\Ҧ*??zѐ?+?3??@3X???(?tS??R?s??/?Iч?f?jS۔?
                                                            n??{??h?/??Ȫ	???+&F?D҈??M?D;?^8??TP????+?????|2?S??{??2E??)??^skcx?f??U?EB]]:?P?pf??}?𹌱q%?ns?_F[?qFғ[y0Y??gGO???)kZi?}N]???H??ej)??:̙?&I???9?i?i!?_?poG?2r?*Y?u??$Ȍ׹YRǉ???T?O???&%>?ӄ?0Ԑ0(0O?c?B	???s??G]?7<FŇ9tczܖ??]??㼈?N	?????%??Hξ??????+CRQ?l?IFI?%g}{nb?8?!?P?????w???ؾ!+Мn???)????u?&?EƬ??0?owT?}?ƴD?]?Q?[?|(b??X?c??vW-??R?>0x?g??ƐJ?/1?#?w?Q??N?K:???5?~????dGo?	ć??$??O?R';4??AMb???'A_;??K?^&?*?Fi"?\??yV?;K$[???>??/Q?E-??W?u;<P3|9Y?C?r??:?1?:;$*݄?Ѯ????_ِc????3??gr???@??:??1???
?!?P_܁x#SC`?=@U?Irj???ǒ?8-z???;??b
?$+??sh???e.#݄
.%3??P?ǽ|gg?J??-`?z]???z)h}G/???W? ?q?G?':??
                       &1??
                           ?dq??r?pc?ƁnR??1M
                                            ?????7r<?%W?T?????l?) ?/ʘWv???6??ܹ??\u?x?;'???? ~_ȓݳ?2DƟ4???@??l=Z????/:v??2???x???ڌ@??[n??"???(~-)?x#f??\???p?'~??wv#3?ɱ??(??d?%
??K?֔n????J?:=r????&??Qz??3?9?{?????*P???`m??1?
????l>?L??1?s??W??_?}cS?Mp?N/<k?hVuJ?ZR????+????O?xݲxH?G??@?Z.?t?t|/?4?OX9??%H??ܭ??ԟ?????~?\???Ty???֩e????O??i‘??P??v?a????棱?l-lR??\???*?
                                                         rI??԰??:F?C???k;??4{Қ?Q?|???D
       ???0??%U?E???*2???94???/??Sp???H$?༈?@[q??#D??:?
endstream
endobj

8 0 obj
11872
endobj

9 0 obj
<</Type/FontDescriptor/FontName/BAAAAA+CourierNewPSMT
/Flags 5
/FontBBox[-121 -679 622 1021]/ItalicAngle 0
/Ascent 832
/Descent -300
/CapHeight 1020
/StemV 80
/FontFile2 7 0 R
>>
endobj

10 0 obj
<</Length 314/Filter/FlateDecode>>
stream
1??E?2q!?5??Μ?Ȣ??? Q???Lx}z?3????c?E?Gm+6T?#?????U???g?Kߨ?[̽M?;İ??ϩl??g?@?7?zU>|???p?;??(r,6??vL?rj????5?~>
                          ?U͹?y%??w['?~
                                       DR??敠??|3?????Eܟ&Ҁ?"N3???I΅?j?/??y?t????
礌mP??2ن?? Av???֍??)???????V]?=?C?H?nmv???
t??"?                                     L'^??
     y???_??b?~??N?-3?J?Xih4>??M????	
endstream
endobj

11 0 obj
<</Type/Font/Subtype/TrueType/BaseFont/BAAAAA+CourierNewPSMT
/FirstChar 0
/LastChar 20
/Widths[600 600 600 600 600 600 600 600 600 600 600 600 600 600 600 600
600 600 600 600 600 ]
/FontDescriptor 9 0 R
/ToUnicode 10 0 R
>>
endobj

12 0 obj
<</F1 11 0 R
>>
endobj

13 0 obj
<</Font 12 0 R
/XObject<</Tr4 4 0 R>>
/ExtGState<</EGS5 5 0 R>>
/ProcSet[/PDF/Text/ImageC/ImageI/ImageB]
>>
endobj

1 0 obj
<</Type/Page/Parent 6 0 R/Resources 13 0 R/MediaBox[0 0 595 842]/Group<</S/Transparency/CS/DeviceRGB/I true>>/Contents 2 0 R>>
endobj

6 0 obj
<</Type/Pages
/Resources 13 0 R
/MediaBox[ 0 0 595 842 ]
/Kids[ 1 0 R ]
/Count 1>>
endobj

14 0 obj
<</Type/Catalog/Pages 6 0 R
/OpenAction[1 0 R /XYZ null null 0]
/Lang(J?@n?)
>>
endobj

15 0 obj
<</Creator<D79C55D597C05122BBA77C6422F8>
/Producer<D79C55CE97DB5129BBA17C6422C5663ECCB5C3497D5DCB00ABD3B22A86C149D3>
/CreationDate(mYg???as??M9?Vk???M?)>>
endobj

16 0 obj
<</Filter/Standard/V 2/Length 128/R 3/O(?|A:m?p!S?3?Gх??H???ΧW;?T?\b)/U(??<[?NVc?п??}?)/P -1028>>
endobj

xref
0 17
0000000000 65535 f 
0000013470 00000 n 
0000000019 00000 n 
0000000276 00000 n 
0000000296 00000 n 
0000000473 00000 n 
0000013613 00000 n 
0000000513 00000 n 
0000012470 00000 n 
0000012492 00000 n 
0000012687 00000 n 
0000013071 00000 n 
0000013312 00000 n 
0000013345 00000 n 
0000013712 00000 n 
0000013809 00000 n 
0000013984 00000 n 
trailer
<</Size 17/Root 14 0 R
/Encrypt 16 0 R
/Info 15 0 R
/ID [ <52A9A94A6311847A966B5621D77A4DD3>
<52A9A94A6311847A966B5621D77A4DD3> ]
/DocChecksum /4A8590018ADCF5BB4BA4541CAACB1A4E
>>
startxref
14122
%%EOF"
- hello: "world"
-
- - do:
- indices.refresh: {}
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- hello: "world"
-
- - match: { hits.total: 1 }
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- file1.author: "kimchy"
-
- - match: { hits.total: 1 }
-
-#---
-# This test has been disabled as it tries to decode an encoded PDF using BouncyCastle lib
-# Elasticsearch security manager does not allow permission java.security.SecurityPermission "insertProvider.BC";
-# See https://github.com/elastic/elasticsearch/pull/13077
-# See https://github.com/elastic/elasticsearch-mapper-attachments/pull/150#issuecomment-134247110
-#
-# https://github.com/elastic/elasticsearch-mapper-attachments/issues/18
-# Encoded content with https://www.base64encode.org/
-# File1
-#<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-# "http://www.w3.org/TR/html4/loose.dtd">
-#<html lang="fr">
-#<head>
-# <title>Hello</title>
-# <meta name="date" content="2012-11-30">
-# <meta name="Author" content="kimchy">
-# <meta name="Keywords" content="elasticsearch,cool,bonsai">
-#</head>
-#<body>World</body>
-#</html>
-# File2 is an encrypted PDF with a password
-
-#"Multiple Attachments With Encrypted Doc Should Fail":
-#
-# - do:
-# indices.create:
-# index: test
-# body:
-# settings:
-# index.mapping.attachment.ignore_errors: false
-# mappings:
-# doc:
-# properties:
-# file1:
-# type: attachment
-# file2:
-# type: attachment
-#
-# - do:
-# catch: /(.)*mapper_parsing_exception(.)*The supplied password does not match either the owner or user password in the document\.(.)*/
-# index:
-# index: test
-# type: doc
-# id: 1
-# body:
-# file1: "PCFET0NUWVBFIEhUTUwgUFVCTElDICItLy9XM0MvL0RURCBIVE1MIDQuMDEgVHJhbnNpdGlvbmFsLy9FTiINCiAgICAgICAgImh0dHA6Ly93d3cudzMub3JnL1RSL2h0bWw0L2xvb3NlLmR0ZCI+DQo8aHRtbCBsYW5nPSJmciI+DQo8aGVhZD4NCiAgICA8dGl0bGU+SGVsbG88L3RpdGxlPg0KICAgIDxtZXRhIG5hbWU9ImRhdGUiIGNvbnRlbnQ9IjIwMTItMTEtMzAiPg0KICAgIDxtZXRhIG5hbWU9IkF1dGhvciIgY29udGVudD0ia2ltY2h5Ij4NCiAgICA8bWV0YSBuYW1lPSJLZXl3b3JkcyIgY29udGVudD0iZWxhc3RpY3NlYXJjaCxjb29sLGJvbnNhaSI+DQo8L2hlYWQ+DQo8Ym9keT5Xb3JsZDwvYm9keT4NCjwvaHRtbD4NCg=="
-# file2: "%PDF-1.4
%äüöß
2 0 obj
<</Length 3 0 R/Filter/FlateDecode>>
stream
0~k?????l5?7,>??˟Xje?.L?K1-?&?[D3?HV???v?`v?WqDNA?i<ZM??MQ?w??+T?f&??NH?xY??D???}@ ?IgҞa?u???D??=DO?H??o?o???y?QY?t???8??kc?yf?????n?Mf|Ezu\&?Oy?j,?=x_)??K0m
endstream
endobj

3 0 obj
186
endobj

4 0 obj
<</Type/XObject
/Subtype/Form
/BBox[ -9 420 604 420.1 ]
/Group<</S/Transparency/CS/DeviceRGB/K true>>
/Length 8
/Filter/FlateDecode
>>
stream
?? ?
ۉ
endstream
endobj

5 0 obj
<</CA 0.5
   /ca 0.5
>>
endobj

7 0 obj
<</Length 8 0 R/Filter/FlateDecode/Length1 19920>>
stream
?G?ˊ?e?Q?Q??0+?3B?7M??M?
_w?;4??L??A??7??µ???s?9OS$<]?ղ?C?܎OYc???R??!6???Ik??u???
                                                        {???????y?v?? )t"?l7?????mk??ۮ??sU#7??3?P??u?U?,?\)??z?J?=?? ??:@?".væ2?凚?????u]??e?(???O??qA?nXs|8?u?@{??,yX?Nc؄a˷????b_K?&??????A2?*(???y????m??N??l?
??ɱX??????V/kytlK?y\k?w?L6Q??C!???[?,???f?:?f?  ?.o?a?
?z?в?????L6QWi???(??:1??mwx?r$??RY??+?T!Á,?-?????K?`۝???|=>???o???TaF?|,N=???N?r?d?B?
TP??͛?Y?ٓV&???]ɐ`"?ㅰ??-?k|2+?sP:??M?O?{??OV?Ԧ??}???????R?
                                     s
?w?j??P?Mw??O?q?|?ѫ?
                    ?
                     |A4?Th???vk*??m???
                                       ??????:*
?޺BO[?8?,??A???X?       ??[????Z?{yCr?~;|M??c?Bø1G?????֡??~??oC??F?q8?e???q??kX?n?T?	??iۙ릚??(	g?J??x??ԋ
?L0??7sx?t~???$?????*???V??CV:?>??a??k?q?s???ܓZ)???Ə?s??̾m:`֗Eέe??t۞??=K0(?j?}??z??4@?`??7?O[Y?,?/W0??b??1Q?B<??
??F?/??3?ŋ?1???᧩???ګKk ^??YT???#.nY??Dg??X????ě????!m???F
                                                         ?Iܙ?wϲO?ĭO?qR?O3??\?2x?>k/FJa?'SuY	4u~?5j?a??i߅𸳋J?΃??{? ??ue?ďj??I????I?e.??O?Ą?UE{?9A??ct?zN_??s????hP?????WM??[(???7?<4Z???z?(???6??ƪB?@?8R?Q?ۨ2y?.!A?
?4?A*7?|?Ղ?_|? ?ڵ?雈?F??6?B>???$??`???P@\????k?܂?
                                                 ?L??0????z?\???????W?IR$~??g?{-o?ҧR?hq??Wk?ic?4"??B4?$ ?X??n??f쐨r?u.??2!?4?H????3?DH???]?ب?'?bw??#??F8?-?ӈ?Ȧ?w?WC????(z?
\?UΠ??q?/???0
             ?1cW?(?&5}??\V?N3??_????@!??~??6QS????????Gtr2??	Z?N??3
                                                                      ?\?*?R????(?????1?%?Z\??4?u?;?z??W?(Ώ??י????9<?3??~??1
??եo??aۇ.?tv<&??-w?oh?ӯ?:??2??)'??1?'?	|?#Sӵ:???6܇?2?A?*?41M??j??b???jAZT??6I5???:ͽ?? ???c??P?
               ???6/{̣S]-?%??/????c1;?j#$#d?	???dm??-7?v????y?~n?tM?p7懑!|?2??4??4F?"?ʌ7?p?֏}.?S?#(
                      N???
                          =<??x0??!?Je???m???ጱ?H?n?P?J(?qTz	??O???OӼ9?լ???(f?K?G@???{??!iB?^_2v?TV28?H?.?X??{????C?F<iх߬7,??r?ц??_S:m??H??_R?3h	?g?lU6?w????U?????v??????Xy!C?n$???
                           ?r????J鮻'???+)*L9S???????/?C?????Xme????x??\?5%"[~'??Dr??%}?Hu_GI?AAO?ճ???k??/?z@??aM?(?Rb??????'%E
                                               Y=??Ī???jRy?PYK????{w]}?P?ك????(?!????31??
         t????T???v?x=???????R?3???-*?nHy??0͓?rQ??ݕZ?n???a?w?j?Ѓ? ??Q?/!u?f]g%?X?t{?=i`?z?C?m]?Sd=?p?%?h?
                        ?\?????e???b٪k~??z7ފ?t?"9????*3??y?Sh?? g???H?k?Y????GP4??z?t?`CQ?瀏??^?Q?u??NJ?c[4?q?ѕ?y?R??'?or?!?%??`L??bc?Q????q?p?d?T?;????w?????]BJ.?Ϥ?sm?U
?v?9o??=({?=??p?fjw????O???????Y???vp?L?~??\֋????i~?$????>???	?կI?w??k|?&V
                                                  ?~R?u?|&?Ba??VV?????G+?E$?kJ?9>?uwOH?D??????a?-8?????p?1?^D7?sz?[?+RSZ=??9?N?Pj?G?{???M4
z???h}?(z?8?8????ތ.?'??'?QR??@??$??$X
???
   ???<??XI??h?s?p<?t????4a?zHE-?{U??)??ԫVtX$f???j?"/??mL}?eܹ#R?C?????B1??d?eo?
                                                                               u+>t>ԃtK?Q????m\????Oɑ1?aN>?/4pnU
                                ;>?*f?<?mZI??b?1@?h??!u?^?|̮?,+ma??00'?z?[???]\?`ZMP"?iA????삑4jE3?u?Tr??p?PR???N?e$~g??p???NGn???S}?q?R??Kk??/~??ڶ?I3??ݤ_:?
                                                                           B?\?N?t>hi??;??jk,g?˴??z?ź!??wf?????Y{{????.?p>o(xnzۨEm~3B???o.????;????	?{3??K?"&/U"?y???U????
              ??dM????U+??.?s?wSͻ@?
                                   ?1&???h?H?[?J7?f???ř?3?x24?8dwg??&???0yJ?C??%;ʍn?or?Mm?	Ðh???Ma,?!???l
y*?x?7?jZ"??<_??z?@??#??H?=?Է?P?W9[?\??????Hn?q=D?ԯ?????5-[???l???{_f??<?t5֮??ܼ???J?]?H|??"?$????n???'????7!,vQ??̊??<
                                  *?^qa?j?]??D<ח??Ą??YF???Eޞ!0?SV??m???p^??0?^??7?҈?`???5f????9M??[???n?z?/??
                             0??<rH????~??	S-04|u??ZL??,???Η7L-?Ǡ??!2??????"?zů?J?]w??????vcU??? ?p???:X핎:W?l?#@j??.??????^????????dN3]G-fO{t?=?j9 H4?Vr?????=??h'$?|?IG??u?;s5???/ʰ?|???6?p???RV?W?g?,??Z{5?zs!o? t+?)?WE?
???*??'ɞ?a??????@_!c????&dǎe"?E??8??oe?????`???o???:?)2??+??q????-?-Hp??ܰn??x!??0?By?ʣfO?tJ???NR?6?bV,vʺ3?
                     ?.n@`?Vs;??B??i???(?G?5???_??Yr?A?????)?K&?٭???;8??????u???P?n6"*6׷??b?bIK?f&???2(?+??f\1?z]?%?Xˠ?H/????E???@P̣?W??7?E??Pn??????8+O???fz?*&??ҟ?€?N?O)$???{d?h?<??ԷZeT???????'???"???S
                                          ?N????-z9??ձ3?'?d91????ҫ?
                                                                   ?oc??D2ՐLB??A?!??a?ȹ?x?h??G?H̟????Q??????2?zҳ?}?$'??????J???4/????@U????d??????^|?n??Aח??S?<???zgyz?_y??w?1?5??????9?Ös??.+o???YK?#???Ԙ???g?YO???
                                                   e??2??v?֧=?????<%?<?f?Yq?9?4;[΄?\?W????1ڜ??:tʅ?0????\???n?L??t?e??????-t??.????d)a???<???ot?]
                                                               ;?BQ&Tށ?gf9?#v͇@??BF8?jf?n?%K?T}x~U?????-5!?c??q+?f?t4ffmA)?#??"?v?-[>???~<Ԯ????r
                                                               ?f?????~??????'???.???M ????bv	??R??*?Ĉ?v_???նN??b?{k??0??BU?a? ?+??!AQ?5=|?A`"??|n?L????My.(????r*???%I"??=-^?M??#??????~?0MǷɩ???E??r???d?P.K???l"?lP]??$?C???J	}
N?????9?lG???l̂?iU7 &?D(l??H???m???W#?̨?+ۙJ?y:7?@?|W}?a???tyh??Z????a??z?)?????m?4%?7?ҵZ؅?K??X?~ӧ?????????ѵ???53*9د?׉
D*?7#k?w)?ɫR?
?????o'?w'x??;x??5????/??أAD2??P??????#LZ?.??Pé?24???^:?Ҫ???V?ۍ??-yg??o???$??
/?|S\?9T???9_????Z???ȱ!p?tU??F?!˴E??X?cIU?%x???3??YX:w?0???w?W?2
                                                                *
                                                                 ?ӣ??'?'`??${????i0?BJ?_?Ld?yY??????	?[?Qކx(\֘6??"?P%?*???-??h??n???>???????5.3??9???=??O??F%?C(ヮ??m??EN?UuP%co??:?Jyӹ??8??U???y,ZU????`?;????˝^#wb:iͰ?bQ?	?n??۩?X?0??#i?o?.?A??6B!??ߏ%??s???nݵ?y8?,?jk?|??{?V?<?ַ?ߊ?.??`@vV҂??"j??G?P?b?nt?փ????#?&H??.?G?hD??ƨ"??%?%????G3???{?w7??ݓG'?
                              ?w?????Ib`?Y?:?&Vmr??siX????n??_???4jy??KE*?ΘRx?x?;#23d$5??p+2"?\?<=$ɧ?YG???_M%??eB|JLBiւ?6?C՝?TvV_?r???s?ڤL?ò\[????|ml?	???c??3??????m?'"?>?{vԙ
               ?L??Ju2?b?`?i?f?<??`_??^?aU??ت?!?K??W`??@?D	?O?~??P~???gT????ne?o?璄???>??{$??tvj?s?#7?K?&6?֥?j??ݎ?x?&oc?/?r????%????p>G?%?H*&????4?!QC???Ϩ??dE???>Q?!?TSAJ\?bt??\ ?q8?!??Ԍ???d??*
??(l?Y?*??uS?]F)???+???????ߊײqI[?w??ZE?I38WNY??xs???Ĕ|??]IU8.7??/????????i?Ј??	Yxwh???Q?bYЄ?y???H}?O???Nځ??׻`???? ???V?
??ILF??G:;#F6&?v*?GW<??AH??o0??2a?????S)T????|KU???T	7?z????¶uI??NJr???ܬ?"=??B?q??R.?
          f??q?1?1?	?????>$|?.?ju?s`p?qS?Ց	?n?e????Nv:?˵????,?
                                                                   =\??%????X?˄?Q???Ǎ??WtՐ??w??|????-8f4??޺?|??{i?WA)?????+'??'?@:Mׁ??9Qou?'?S5eD?T7???A???mdv??"?p?rOhF?$I?߫2?%Y`??????auy?͗*T?G>?????]?d0?Y4>8]??e??CX????H?ؑY?Ԕ??3?;?|??vM??z???骛???u???ܳ?A4?????h??e?ћȉ?????j????(Wd???iBQP? D???疷???Ջ???C\5?/Av?fp???o@??????oD$???7???Xu?+
              톱X?Q?;\Q????[??ا?ؗ?w
v?xS??`?%?o???4??ZD?2?fc??.?Dz???Uo?^>?&??u??H?ڣ??&
                                                   ~?????=z?gGT??b???s<'??m?kx?R?#y?6?????FP4<??A?h??,??{1??DgEpF???	??u&???nv޽6????l7?UOt?*?e???Wq??^??+Wz??1?m?(\ZB?v=t/M(??$?_?5??L~?????-D\RV?}ܒ??4??
??
?!???w(Ƀ????:7?2{G??(?f??,M????0?rjKGX܄0U?}???Ӌ?3?l?p?w
                                                       ?=e6???2fR?"+h
                                                                     KP?'??]&(???v?@l???rx?vhRrc?T%??E޼k??+???6?Ri>:g????3??????_V??fX?????l?אָ%???$1?
"2???5GJ??O6???1?Ow???
hQH??)e4???=\?L?/?!A?=?^?$??e?????r>ka?O?j3?)?????eA?۸???!?/?]o?b?G?C#nk??1Lb?X??h???????8&?q?q!
?o??Y9( ?H@?!\?r ?G????+
                    .0߃+???-?3$i???D?A??2h??v?`?-?{?'2?XGr???H!uqy^cd?c?????*H6ڃ3?3???I?
        ??'/M ?Q??K????*
                         ?
8?l?Ţ????q<˟y??(??x???r?l??
??SA?|z9?g/8Mf?k&??*:?Wk?????B??N????ۇ^?H?\0r??\????Ǡ????!R??6_?	$?3YY'm??R?`b?e??g:?ud?_???9~???*?hHߡ???5??AOO?4??Mp?u?2h??^?{baT????S?"b????k?l??????sp?`yO?.*?s?0??q?7?5߅Fl?`m??<?ͅ?Mi????T??G?????%vG}?5ͧ????x??}@%???gSvlݍ?#.?? ?lid??4T?+?????????V?.???Z??	!???s??F6
Y
 ?1??}
?m?aq?/@?X;G?a?6m?p?UfqZ?I?L?'S???%??9???7?NN??p?A$mJ?X???2ͩ??l?{*??G̫???Ȟ
                                                        ???H??H?qQ???Ck<y?j??;
                                                                              Q9?s????z!]չmaL??1?k??Ȳ????x?1?D?}?5?? ????????]u???!O?P?8M??2?P???9(C?!?[pJ??ɒs?e?<?V??v?E?6'??U?$?F?V?P;??j?~???'V?ud?oq???b???rq????'?N?Г8{ҥ?0?Q\ނOm?'?=O?j#??'?????-*䫦??9?%X??????????kS㘝$$?Q?Ƽ??!̈?Qzf??ј???KQ?J1?????r?o?ք?nKy?5~??Yޔ??M?v???x??a<??]?S?0?}a?u?:]~K3?rhy?*V??aj?v2T?[??????M?S?????tD6?Kkw????߄r
                                                                     P"????k??b?Q{???????Z??
p??0?7?????5Q?Bӎ?:j??>)?r???9
                             ? ???˶?x?T_z?|N???kUK?i?s?nG?jp???!?@7}????nO?????v:&?3gNv?L?,zd??E?k?&wy??ي????|?????????7????
?s?????U?????V??Tw+\Ҧ*??zѐ?+?3??@3X???(?tS??R?s??/?Iч?f?jS۔?
                                                            n??{??h?/??Ȫ	???+&F?D҈??M?D;?^8??TP????+?????|2?S??{??2E??)??^skcx?f??U?EB]]:?P?pf??}?𹌱q%?ns?_F[?qFғ[y0Y??gGO???)kZi?}N]???H??ej)??:̙?&I???9?i?i!?_?poG?2r?*Y?u??$Ȍ׹YRǉ???T?O???&%>?ӄ?0Ԑ0(0O?c?B	???s??G]?7<FŇ9tczܖ??]??㼈?N	?????%??Hξ??????+CRQ?l?IFI?%g}{nb?8?!?P?????w???ؾ!+Мn???)????u?&?EƬ??0?owT?}?ƴD?]?Q?[?|(b??X?c??vW-??R?>0x?g??ƐJ?/1?#?w?Q??N?K:???5?~????dGo?	ć??$??O?R';4??AMb???'A_;??K?^&?*?Fi"?\??yV?;K$[???>??/Q?E-??W?u;<P3|9Y?C?r??:?1?:;$*݄?Ѯ????_ِc????3??gr???@??:??1???
?!?P_܁x#SC`?=@U?Irj???ǒ?8-z???;??b
?$+??sh???e.#݄
.%3??P?ǽ|gg?J??-`?z]???z)h}G/???W? ?q?G?':??
                       &1??
                           ?dq??r?pc?ƁnR??1M
                                            ?????7r<?%W?T?????l?) ?/ʘWv???6??ܹ??\u?x?;'???? ~_ȓݳ?2DƟ4???@??l=Z????/:v??2???x???ڌ@??[n??"???(~-)?x#f??\???p?'~??wv#3?ɱ??(??d?%
??K?֔n????J?:=r????&??Qz??3?9?{?????*P???`m??1?
????l>?L??1?s??W??_?}cS?Mp?N/<k?hVuJ?ZR????+????O?xݲxH?G??@?Z.?t?t|/?4?OX9??%H??ܭ??ԟ?????~?\???Ty???֩e????O??i‘??P??v?a????棱?l-lR??\???*?
                                                         rI??԰??:F?C???k;??4{Қ?Q?|???D
       ???0??%U?E???*2???94???/??Sp???H$?༈?@[q??#D??:?
endstream
endobj

8 0 obj
11872
endobj

9 0 obj
<</Type/FontDescriptor/FontName/BAAAAA+CourierNewPSMT
/Flags 5
/FontBBox[-121 -679 622 1021]/ItalicAngle 0
/Ascent 832
/Descent -300
/CapHeight 1020
/StemV 80
/FontFile2 7 0 R
>>
endobj

10 0 obj
<</Length 314/Filter/FlateDecode>>
stream
1??E?2q!?5??Μ?Ȣ??? Q???Lx}z?3????c?E?Gm+6T?#?????U???g?Kߨ?[̽M?;İ??ϩl??g?@?7?zU>|???p?;??(r,6??vL?rj????5?~>
                          ?U͹?y%??w['?~
                                       DR??敠??|3?????Eܟ&Ҁ?"N3???I΅?j?/??y?t????
礌mP??2ن?? Av???֍??)???????V]?=?C?H?nmv???
t??"?                                     L'^??
     y???_??b?~??N?-3?J?Xih4>??M????	
endstream
endobj

11 0 obj
<</Type/Font/Subtype/TrueType/BaseFont/BAAAAA+CourierNewPSMT
/FirstChar 0
/LastChar 20
/Widths[600 600 600 600 600 600 600 600 600 600 600 600 600 600 600 600
600 600 600 600 600 ]
/FontDescriptor 9 0 R
/ToUnicode 10 0 R
>>
endobj

12 0 obj
<</F1 11 0 R
>>
endobj

13 0 obj
<</Font 12 0 R
/XObject<</Tr4 4 0 R>>
/ExtGState<</EGS5 5 0 R>>
/ProcSet[/PDF/Text/ImageC/ImageI/ImageB]
>>
endobj

1 0 obj
<</Type/Page/Parent 6 0 R/Resources 13 0 R/MediaBox[0 0 595 842]/Group<</S/Transparency/CS/DeviceRGB/I true>>/Contents 2 0 R>>
endobj

6 0 obj
<</Type/Pages
/Resources 13 0 R
/MediaBox[ 0 0 595 842 ]
/Kids[ 1 0 R ]
/Count 1>>
endobj

14 0 obj
<</Type/Catalog/Pages 6 0 R
/OpenAction[1 0 R /XYZ null null 0]
/Lang(J?@n?)
>>
endobj

15 0 obj
<</Creator<D79C55D597C05122BBA77C6422F8>
/Producer<D79C55CE97DB5129BBA17C6422C5663ECCB5C3497D5DCB00ABD3B22A86C149D3>
/CreationDate(mYg???as??M9?Vk???M?)>>
endobj

16 0 obj
<</Filter/Standard/V 2/Length 128/R 3/O(?|A:m?p!S?3?Gх??H???ΧW;?T?\b)/U(??<[?NVc?п??}?)/P -1028>>
endobj

xref
0 17
0000000000 65535 f 
0000013470 00000 n 
0000000019 00000 n 
0000000276 00000 n 
0000000296 00000 n 
0000000473 00000 n 
0000013613 00000 n 
0000000513 00000 n 
0000012470 00000 n 
0000012492 00000 n 
0000012687 00000 n 
0000013071 00000 n 
0000013312 00000 n 
0000013345 00000 n 
0000013712 00000 n 
0000013809 00000 n 
0000013984 00000 n 
trailer
<</Size 17/Root 14 0 R
/Encrypt 16 0 R
/Info 15 0 R
/ID [ <52A9A94A6311847A966B5621D77A4DD3>
<52A9A94A6311847A966B5621D77A4DD3> ]
/DocChecksum /4A8590018ADCF5BB4BA4541CAACB1A4E
>>
startxref
14122
%%EOF"
-# hello: "world"
-#
-# - do:
-# indices.refresh: {}
-#
-# - do:
-# search:
-# index: test
-#
-# - match: { hits.total: 0 }
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/20_search.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/20_search.yaml
deleted file mode 100644
index bc178b1789..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/20_search.yaml
+++ /dev/null
@@ -1,111 +0,0 @@
-# Integration tests for Mapper Attachments plugin
-#
-
-setup:
- - do:
- indices.create:
- index: test
- body:
- mappings:
- doc:
- properties:
- file:
- type: attachment
-
----
-# Encoded content with https://www.base64encode.org/
-#<html xmlns="http://www.w3.org/1999/xhtml">
-#<head>
-# <title>XHTML test document</title>
-# <meta name="Author" content="Tika Developers"/>
-# <meta http-equiv="refresh" content="5"/>
-#</head>
-#<body>
-#<p>
-# This document tests the ability of Apache Tika to extract content
-# from an <a href="http://www.w3.org/TR/xhtml1/">XHTML document</a>.
-#</p>
-#</body>
-#</html>
-
-"Mapper Attachment Simple":
-
- - do:
- index:
- index: test
- type: doc
- id: 1
- body:
- file: "PGh0bWwgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGh0bWwiPg0KPGhlYWQ+DQogICAgPHRpdGxlPlhIVE1MIHRlc3QgZG9jdW1lbnQ8L3RpdGxlPg0KICAgIDxtZXRhIG5hbWU9IkF1dGhvciIgY29udGVudD0iVGlrYSBEZXZlbG9wZXJzIi8+DQogICAgPG1ldGEgaHR0cC1lcXVpdj0icmVmcmVzaCIgY29udGVudD0iNSIvPg0KPC9oZWFkPg0KPGJvZHk+DQo8cD4NCiAgICBUaGlzIGRvY3VtZW50IHRlc3RzIHRoZSBhYmlsaXR5IG9mIEFwYWNoZSBUaWthIHRvIGV4dHJhY3QgY29udGVudA0KICAgIGZyb20gYW4gPGEgaHJlZj0iaHR0cDovL3d3dy53My5vcmcvVFIveGh0bWwxLyI+WEhUTUwgZG9jdW1lbnQ8L2E+Lg0KPC9wPg0KPC9ib2R5Pg0KPC9odG1sPg=="
-
- - do:
- indices.refresh: {}
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- file.title: "test document"
-
- - match: { hits.total: 1 }
-
----
-# Encoded content with https://www.base64encode.org/
-#Begin
-#
-#BeforeLimit AfterLimit
-#
-#Broadway
-#
-#Nearing the end
-#
-#End
-
-"Mapper Attachment ContentLength Limit":
-
- - do:
- index:
- index: test
- type: doc
- id: "withlimit"
- body:
- file:
- _indexed_chars: 20
- _content: "QmVnaW4NCg0KQmVmb3JlTGltaXQgQWZ0ZXJMaW1pdA0KDQpCcm9hZHdheQ0KDQpOZWFyaW5nIHRoZSBlbmQNCg0KRW5k"
-
- - do:
- index:
- index: test
- type: doc
- id: "nolimit"
- body:
- file:
- _indexed_chars: -1
- _content: "QmVnaW4NCg0KQmVmb3JlTGltaXQgQWZ0ZXJMaW1pdA0KDQpCcm9hZHdheQ0KDQpOZWFyaW5nIHRoZSBlbmQNCg0KRW5k"
-
- - do:
- indices.refresh: {}
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- file.content: "BeforeLimit"
-
- - match: { hits.total: 2 }
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- file.content: "AfterLimit"
-
- - match: { hits.total: 1 }
- - match: { hits.hits.0._id: "nolimit" }
-
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml
deleted file mode 100644
index 5aaa5a0796..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Integration tests for Mapper Attachments plugin
-#
-
----
-# Encoded content with https://www.base64encode.org/
-#<html xmlns="http://www.w3.org/1999/xhtml">
-#<head>
-# <title>XHTML test document</title>
-# <meta name="Author" content="Tika Developers"/>
-# <meta http-equiv="refresh" content="5"/>
-#</head>
-#<body>
-#<p>
-# This document tests the ability of Apache Tika to extract content
-# from an <a href="http://www.w3.org/TR/xhtml1/">XHTML document</a>.
-#</p>
-#</body>
-#</html>
-"ContentType and Name":
-
- - do:
- indices.create:
- index: test
- body:
- mappings:
- doc:
- properties:
- "file":
- "type": "attachment"
- "fields":
- "content_type":
- "store": true
- "name":
- "store": true
-
- - do:
- index:
- index: test
- type: doc
- id: 1
- body:
- file:
- _content: "QmVnaW4NCg0KQmVmb3JlTGltaXQgQWZ0ZXJMaW1pdA0KDQpCcm9hZHdheQ0KDQpOZWFyaW5nIHRoZSBlbmQNCg0KRW5k"
- _content_type: "text/my-dummy-content-type"
- _name: "my-dummy-name-txt"
-
- - do:
- indices.refresh: {}
-
- - do:
- search:
- index: test
- body:
- stored_fields: [file.content_type,file.name]
-
- - match: { hits.total: 1 }
- - match: { hits.hits.0.fields: { file.content_type: ["text/my-dummy-content-type"], file.name: ["my-dummy-name-txt"] }}
-
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml
deleted file mode 100644
index 658887a9ce..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-# Integration tests for Mapper Attachments plugin
-#
-
-setup:
- - do:
- indices.create:
- index: test
- body:
- mappings:
- doc:
- properties:
- "file":
- "type": "attachment"
- "fields":
- "content" :
- "type": "text"
- "store" : true
- "term_vector": "with_positions_offsets"
-
----
-# Encoded content with https://www.base64encode.org/
-#<html xmlns="http://www.w3.org/1999/xhtml">
-#<head>
-# <title>XHTML test document</title>
-# <meta name="Author" content="Tika Developers"/>
-# <meta http-equiv="refresh" content="5"/>
-#</head>
-#<body>
-#<p>
-# This document tests the ability of Apache Tika to extract content
-# from an <a href="http://www.w3.org/TR/xhtml1/">XHTML document</a>.
-#</p>
-#</body>
-#</html>
-
-"Highlight content":
-
- - do:
- index:
- index: test
- type: doc
- id: 1
- body:
- file: "PGh0bWwgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGh0bWwiPg0KPGhlYWQ+DQogICAgPHRpdGxlPlhIVE1MIHRlc3QgZG9jdW1lbnQ8L3RpdGxlPg0KICAgIDxtZXRhIG5hbWU9IkF1dGhvciIgY29udGVudD0iVGlrYSBEZXZlbG9wZXJzIi8+DQogICAgPG1ldGEgaHR0cC1lcXVpdj0icmVmcmVzaCIgY29udGVudD0iNSIvPg0KPC9oZWFkPg0KPGJvZHk+DQo8cD4NCiAgICBUaGlzIGRvY3VtZW50IHRlc3RzIHRoZSBhYmlsaXR5IG9mIEFwYWNoZSBUaWthIHRvIGV4dHJhY3QgY29udGVudA0KICAgIGZyb20gYW4gPGEgaHJlZj0iaHR0cDovL3d3dy53My5vcmcvVFIveGh0bWwxLyI+WEhUTUwgZG9jdW1lbnQ8L2E+Lg0KPC9wPg0KPC9ib2R5Pg0KPC9odG1sPg=="
-
- - do:
- indices.refresh: {}
-
- - do:
- search:
- index: test
- body:
- query:
- match:
- file.content: "apache tika"
- stored_fields: []
- highlight:
- fields:
- file.content: {}
-
- - match: { hits.total: 1 }
- - match: { hits.hits.0.highlight: { file.content : [ "\n\n This document tests the ability of <em>Apache</em> <em>Tika</em> to extract content\n from an XHTML document.\n" ] }}
-
diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml
deleted file mode 100644
index a0e1b600bf..0000000000
--- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-setup:
- - do:
- indices.create:
- index: test
- body:
- mappings:
- test:
- properties:
- file:
- type: attachment
- fields:
- content:
- store: true
- author:
- store: true
- date:
- store: true
- content_length:
- store: true
- content_type:
- store: true
-
----
-"Test mapper attachment processor with .doc file":
-
- - do:
- index:
- index: test
- type: test
- id: 1
- refresh: true
- body: { file: "0M8R4KGxGuEAAAAAAAAAAAAAAAAAAAAAPgADAP7/CQAGAAAAAAAAAAAAAAAEAAAAjAEAAAAAAAAAEAAAjgEAAAEAAAD+////AAAAAIgBAACJAQAAigEAAIsBAAD////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////spcEAg+kMBAAA8BK/AAAAAAABEQABAAEACAAAEwgAAA4AYmpiaoI4gjgAAAAAAAAAAAAAAAAAAAAAAAAMBBYANA4AAOBSAADgUgAAEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAAAAAAAAAAAALcAAAAAAFAHAAAAAAAAUAcAAMcUAAAAAAAAxxQAAAAAAADHFAAAAAAAAMcUAAAAAAAAxxQAABQAAAAAAAAAAAAAAP////8AAAAA2xQAAAAAAADbFAAAAAAAANsUAAAAAAAA2xQAAAwAAADnFAAADAAAANsUAAAAAAAA3hUAADABAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAVRUAAAIAAABXFQAAAAAAAFcVAAAAAAAAVxUAAAAAAABXFQAAAAAAAFcVAAAAAAAAVxUAACwAAAAOFwAAtgIAAMQZAABaAAAAgxUAABUAAAAAAAAAAAAAAAAAAAAAAAAAxxQAAAAAAADzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAIMVAAAAAAAAGRUAAAAAAADHFAAAAAAAAMcUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAPMUAAAAAAAAmBUAABYAAAAZFQAAAAAAABkVAAAAAAAAGRUAAAAAAADzFAAAFgAAAMcUAAAAAAAA8xQAAAAAAADHFAAAAAAAAPMUAAAAAAAAVRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAABVFQAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAP////8AAAAAgI6XYKZ60QEAAAAAAAAAAP////8AAAAACRUAABAAAAAZFQAAAAAAAAAAAAAAAAAAQRUAABQAAACuFQAAMAAAAN4VAAAAAAAAGRUAAAAAAAAeGgAAAAAAABkVAAAAAAAAHhoAAAAAAAAZFQAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADHFAAAAAAAABkVAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAgxUAAAAAAACDFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAN4VAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAP////8AAAAA/////wAAAAD/////AAAAAAAAAAAAAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAB4aAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAABQBwAAPQwAAI0TAAA6AQAABwAMAQ8ADQEAAAwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFRlc3QgZWxhc3RpY3NlYXJjaA0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAABIIAAATCAAA/PgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYWaJVGuQAABhZo3wiGAAIACAAAEwgAAP0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAATIAMZBoATpwpBeqAB+wfC4gsMhBIbCJBSKwiQUjkIkFJJCJBSWwAAAXsMQCGLDEAgyQxAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALgYPABIAAQB8AQ8ACAADAAMAAwAAAAQACAAAAJgAAACeAAAAngAAAJ4AAACeAAAAngAAAJ4AAACeAAAAngAAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAD4CAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAACoAAAANgYAADYGAAAWAAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAC4AAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAaAEAAEgBAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHACAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAMgYAABgAAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAADIGAAAoAgAA2AEAAOgBAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAAA4AQAAWAEAAPgBAAAIAgAAGAIAAFYCAAB+AgAAkAIAAKACAACwAgAAwAIAANACAACAAgAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAAAgAAAAT0oDAFBKAwBRSgMAX0gBBG1IDARuSAwEc0gMBHRIDAQAAAAAQAAAYPH/AgBAAAwQAAAAAAAAAAAGAE4AbwByAG0AYQBsAAAAAgAAABgAQ0oYAF9IAQRhShgAbUgMBHNIDAR0SAkEAAAAAAAAAAAAAAAAAAAAAAAAOgBBIPL/oQA6AAwNAAAAAAAAEAARAFAAbwBsAGkAYwBlACAAcABhAHIAIABkAOkAZgBhAHUAdAAAAAAAVgBpAPP/swBWAAwNAAAAAAAAMAYOAFQAYQBiAGwAZQBhAHUAIABOAG8AcgBtAGEAbAAAABwAF/YDAAA01gYAAQoDbAA01gYAAQUDAABh9gMAAAIACwAAADIAayD0/8EAMgAADQAAAAAAADAGDABBAHUAYwB1AG4AZQAgAGwAaQBzAHQAZQAAAAIADAAAAAAAUEsDBBQABgAIAAAAIQCb6HBP/AAAABwCAAATAAAAW0NvbnRlbnRfVHlwZXNdLnhtbKyRy2rDMBBF94X+g9C22HK6KKXYzqKPXR+L9AMGeWyL2CMhTULy9x07LpQSAoVuBNLMvffMqFwfxkHtMSbnqdKrvNAKyfrGUVfpz81Ldq9VYqAGBk9Y6SMmva6vr8rNMWBSoqZU6Z45PBiTbI8jpNwHJKm0Po7Aco2dCWC30KG5LYo7Yz0xEmc8eei6fMIWdgOr54M8n0hErtXjqW+KqjSEMDgLLKBmqpqzuohDuiDcU/OLLlvIclHO5ql3Id0sCe+ymugaVB8Q+Q1G4TAsQ+LP8xVIRov5ZeYz0b5tncXG290o68hn48XsTwCr/4n+zjTz39ZfAAAA//8DAFBLAwQUAAYACAAAACEApdan58AAAAA2AQAACwAAAF9yZWxzLy5yZWxzhI/PasMwDIfvhb2D0X1R0sMYJXYvpZBDL6N9AOEof2giG9sb69tPxwYKuwiEpO/3qT3+rov54ZTnIBaaqgbD4kM/y2jhdj2/f4LJhaSnJQhbeHCGo3vbtV+8UNGjPM0xG6VItjCVEg+I2U+8Uq5CZNHJENJKRds0YiR/p5FxX9cfmJ4Z4DZM0/UWUtc3YK6PqMn/s8MwzJ5PwX+vLOVFBG43lExp5GKhqC/jU72QqGWq1B7Qtbj51v0BAAD//wMAUEsDBBQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAdGhlbWUvdGhlbWUvdGhlbWVNYW5hZ2VyLnhtbAzMTQrDIBBA4X2hd5DZN2O7KEVissuuu/YAQ5waQceg0p/b1+XjgzfO3xTVm0sNWSycBw2KZc0uiLfwfCynG6jaSBzFLGzhxxXm6XgYybSNE99JyHNRfSPVkIWttd0g1rUr1SHvLN1euSRqPYtHV+jT9yniResrJgoCOP0BAAD//wMAUEsDBBQABgAIAAAAIQBtTVmryAYAAI4aAAAWAAAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbOxZ3YrbRhS+L/QdhO4d/0n+WeINtmxv2uwmIXbS5nJWHkuTHWmMZrwbEwJ9gkIhLb0p9K6F3gTaN+i7pLTpQ/TMSJZn7HH2hy2E0jUs8vg7Z7455+g7I83dey8T6pzjjBOW9tz6nZrr4DRkM5JGPffpdFzpuA4XKJ0hylLcc1eYu/cOP/3kLjoQMU6wA/YpP0A9NxZicVCt8hCGEb/DFjiF3+YsS5CAr1lUnWXoAvwmtNqo1VrVBJHUdVKUgNtp/PvP4OzRfE5C7B6uvY8oTJEKLgdCmk2kb1yYDJYZRkuFnZ3VJYKveEAz5xzRngsTzdjFFL8UrkMRF/BDz62pP7d6eLeKDgojKvbYanZj9VfYFQazs4aaM4tOy0k9z/da/dK/AlCxixu1R61Rq/SnACgMYaU5F92nP+gOhn6B1UD5pcX3sD1s1g285r+5w7nvy4+BV6Dcv7eDH48DiKKBV6Ac7+/gPa/dCDwDr0A5vrWDb9f6Q69t4BUopiQ920HX/FYzWK+2hMwZvW+Fd31v3G4UzjcoqIayuuQUc5aKfbWWoBcsGwNAAikSJHXEaoHnKIQyDhAlpxlxjkkUQ+EtUMo4DNcatXGtCf/lx1NXKiLoACPNWvICJnxnSPJxeJiRhei5n4NXV4M8XzpHTMQkLGZVTgyL+yiNdIv3P33z9w9fOX/9+uP7N9/mk27juY4f4jT6kqD0QxPAajdhePfd2z9+e/vu+6///OWNxX8/Q6c6fEoSzJ2H+MJ5whJYnGUF+DS7nsU0RkS36KcRRymSs1j8jyB+OvrhClFkwQ0gEjruWQYyYwMeLV8YhCdxthTE4vFBnBjAE8bogGXWKDyQc2lhni7TyD55ttRxTxA6t80doNTI82i5AH0lNpdBjA2ajylKBYpwioUjf2NnGFtW95wQI64nJMwYZ3PhPCfOABFrSKbk1KimjdF9kkBeVjaCkG8jNifPnAGjtlUP8bmJhLsDUQv5KaZGGI/QUqDE5nKKEqoH/BiJ2EZysspCHTfiAjIdYcqc0QxzbrN5lMF6taQ/AImxp/2ErhITmQlyZvN5jBjTkUN2FsQoWdiwE5LGOvYzfgYlipzHTNjgJ8y8Q+R3yAOIx750PyPYSPflavAU1FWntCkQ+csys+TyCDOjficrOkdYSQ2Iv6HpCUkvFfgtaff/PWk/IWkYM8uKbkvU7a6NjFxTzvsZsd5P97dEfB9uW7oDls3Ix6/cQ7RMH2O4WXbb1//C/b9wu/954d53P9++XG8UGsRbbl3zzbrauid7d+5zQulErCg+5mrzzqEvzcYwKO3UYysun+QWMVzKOxkmMHBRhpSNkzHxBRHxJEYL2OHXXekk4oXriDsLxmHjr4atviWeLpMTNssfWOt1+XCaiwdHYjNe88txeNgQObrV3jyEle4V20g9LK8JSNvrkNAmM0k0LSTa60EZJPVoDkGzkFAruxUWXQuLjnS/TtUOC6BWZgU2Tg5st3qu74EJGMEzFaJ4JvOUp3qdXZXM28z0vmAaFQC7iHUFbDLdlVz3Lk+uLi+1K2TaIKGVm0lCRUb1MB6jGS6qU45ehcZ1c93dpNSgJ0Oh5oPS2tBodz7E4qa5BrttbaCprhQ0dS56bqvpQ8mEaNFz5/DgD5fJAmqHyw0vohG8PgtFlt/wN1GWRcbFEPE4D7gSnVwNEiJw5lCS9Fy5/DINNFUaorjVGyAIHy25LsjKx0YOkm4mGc/nOBR62rURGen8Kyh8rhXWX5X5zcHSki0h3ZN4duGc0mX2BEGJ+e26DOCMcHj/U8+jOSPwQrMUsk39bTWmQnb1N4qqhvJxRBcxKjqKLuY5XEl5SUd9K2OgfSvWDAHVQlI0wtNINlg9qEY3LbtGzmFv173cSEZOE81NzzRURXZNu4oZM6zbwFYsb9bkNVbrEIOm6R0+l+5tye2utW5rn1B2CQh4GT9L171CQ9CobSYzqEnGuzIsNbsYNXvHeoGXULtKk9BUv7V2uxW3skdYp4PBG3V+sNuuWhiar/eVKtLq6EM/nGCnL0A8hvAaeEkFV6mEo4cMwYZoovYkuWzALfJSFLcGXDnLjPTcVzW/7wUNP6jUOv6o4jW9WqXj95uVvu836yO/XhsOGq+hsYg4qfv5scsYXkTRVXH4osZ3DmCS9bu2OyFLqkydrFQVcXUAU28YBzD5yYszlQcsrkNAdF61GuNusztoVbrN/rjiDQedSjdoDSrDVtAejoeB3+mOX7vOuQJ7/WbgtUadSqseBBWvVZP0O91K22s0+l673xl5/dfFNgZWnstHEQsIr+J1+A8AAAD//wMAUEsDBBQABgAIAAAAIQAN0ZCftgAAABsBAAAnAAAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzhI9NCsIwFIT3gncIb2/TuhCRJt2I0K3UA4TkNQ02PyRR7O0NriwILodhvplpu5edyRNjMt4xaKoaCDrplXGawW247I5AUhZOidk7ZLBggo5vN+0VZ5FLKE0mJFIoLjGYcg4nSpOc0IpU+YCuOKOPVuQio6ZByLvQSPd1faDxmwF8xSS9YhB71QAZllCa/7P9OBqJZy8fFl3+UUFz2YUFKKLGzOAjm6pMBMpburrE3wAAAP//AwBQSwECLQAUAAYACAAAACEAm+hwT/wAAAAcAgAAEwAAAAAAAAAAAAAAAAAAAAAAW0NvbnRlbnRfVHlwZXNdLnhtbFBLAQItABQABgAIAAAAIQCl1qfnwAAAADYBAAALAAAAAAAAAAAAAAAAAC0BAABfcmVscy8ucmVsc1BLAQItABQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAAAAAAAAAAAAAABYCAAB0aGVtZS90aGVtZS90aGVtZU1hbmFnZXIueG1sUEsBAi0AFAAGAAgAAAAhAG1NWavIBgAAjhoAABYAAAAAAAAAAAAAAAAA0wIAAHRoZW1lL3RoZW1lL3RoZW1lMS54bWxQSwECLQAUAAYACAAAACEADdGQn7YAAAAbAQAAJwAAAAAAAAAAAAAAAADPCQAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzUEsFBgAAAAAFAAUAXQEAAMoKAAAAADw/eG1sIHZlcnNpb249IjEuMCIgZW5jb2Rpbmc9IlVURi04IiBzdGFuZGFsb25lPSJ5ZXMiPz4NCjxhOmNsck1hcCB4bWxuczphPSJodHRwOi8vc2NoZW1hcy5vcGVueG1sZm9ybWF0cy5vcmcvZHJhd2luZ21sLzIwMDYvbWFpbiIgYmcxPSJsdDEiIHR4MT0iZGsxIiBiZzI9Imx0MiIgdHgyPSJkazIiIGFjY2VudDE9ImFjY2VudDEiIGFjY2VudDI9ImFjY2VudDIiIGFjY2VudDM9ImFjY2VudDMiIGFjY2VudDQ9ImFjY2VudDQiIGFjY2VudDU9ImFjY2VudDUiIGFjY2VudDY9ImFjY2VudDYiIGhsaW5rPSJobGluayIgZm9sSGxpbms9ImZvbEhsaW5rIi8+AAAAABMAAAAUAAAOAAAIAP////8ACAAAEwgAAAUAAAAACAAAEwgAAAYAAAAAAAAABQAAABIAAAAVAAAABwAEAAcAAAAAABIAAAAVAAAABAAHAAQAAAAEAAAACAAAAOUAAAAAAAAAAwAAAN8IhgCkF6oAlUa5AH419AAAAAAAEwAAABUAAAAAAAAAAQAAAP9AAIABABIAAAASAAAAAEBDewEAAQASAAAAAAAAABIAAAAAAAAAAAAAAAAAAAACEAAAAAAAAAATAAAAoAAAEABAAAD//wEAAAAHAFUAbgBrAG4AbwB3AG4A//8BAAgAAAAAAAAAAAAAAP//AQAAAAAA//8AAAIA//8AAAAA//8AAAIA//8AAAAABQAAAEcOkAEAAAICBgMFBAUCAwTvKgDgQXgAwAkAAAAAAAAA/wEAAAAAAABUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAAADUOkAECAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAgAAAAABTAHkAbQBiAG8AbAAAADMOkAEAAAILBgQCAgICAgT/KgDgQ3gAwAkAAAAAAAAA/wEAAAAAAABBAHIAaQBhAGwAAAA3DpABAAACDwUCAgIEAwIE/wIA4P+sAEABAAAAAAAAAJ8BAAAAAAAAQwBhAGwAaQBiAHIAaQAAAEESkAEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDAGEAbQBiAHIAaQBhACAATQBhAHQAaAAAACAABADxCIgIAPDEAgAAqQEAAAAAWVJDh1lSQ4cAAAAAAgABAAAAAgAAABEAAAABAAEAAAAEAAOQAQAAAAIAAAARAAAAAQABAAAAAQAAAAAAAAAhAwDwEAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAClBsAHtAC0AIGBcjAAAAAAAAAAAAAAAAAAABIAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAABAAAAA8BAACAD8/QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACSFAAAAAACfH/DwAAJFAAABAnAAD///9/////f////3////9/////f////3////9/3wiGAAAEAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAIQQAAAAAAAAAAAAAAAAAAAAAAAAQHAAABAAAAAAAAAAAAHgAAAB4AAAAAAAAAAAAAACgBQAAGkjOCAsAAAAAAAAA3AAAAAEAAAD//xIAAAAAAAAAAAAAAAAAAAAMAEQAYQB2AGkAZAAgAFAAaQBsAGEAdABvAAwARABhAHYAaQBkACAAUABpAGwAYQB0AG8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP7/AAADCgEAAAAAAAAAAAAAAAAAAAAAAAEAAADghZ/y+U9oEKuRCAArJ7PZMAAAANzSAgASAAAAAQAAAJgAAAACAAAAoAAAAAMAAACsAAAABAAAALgAAAAFAAAA0AAAAAYAAADcAAAABwAAAOgAAAAIAAAA/AAAAAkAAAAUAQAAEgAAACABAAAKAAAARAEAAAwAAABQAQAADQAAAFwBAAAOAAAAaAEAAA8AAABwAQAAEAAAAHgBAAATAAAAgAEAABEAAACIAQAAAgAAABAnAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAAEAAAAERhdmlkIFBpbGF0bwAAAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAADAAAAE5vcm1hbC5kb3RtAB4AAAAQAAAARGF2aWQgUGlsYXRvAAAAAB4AAAAEAAAAMgAAAB4AAAAcAAAATWljcm9zb2Z0IE1hY2ludG9zaCBXb3JkAAAAAEAAAAAARsMjAAAAAEAAAAAAFjZWpnrRAUAAAAAAFjZWpnrRAQMAAAABAAAAAwAAAAIAAAADAAAAEQAAAAMAAAAAAAAARwAAAEzRAgD/////DgAAAAEAAABsAAAAAAAAAAAAAAD/AAAAswAAAAAAAAAAAAAAZhkAANsRAAAgRU1GAAABAETRAgAIAAAAAQAAAAAAAAAAAAAAAAAAAOwEAACxAwAAQAEAAPAAAAAAAAAAAAAAAAAAAAAA4gQAgKkDABEAAAAMAAAACAAAAAoAAAAQAAAAAAAAAAAAAAAJAAAAEAAAAAABAAC0AAAADAAAABAAAAAAAAAAAAAAAAsAAAAQAAAAAAEAALQAAABRAAAAeNACAAAAAAAAAAAA/wAAALMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAALQAAABQAAAAKAAAAHgAAAAA0AIAAAAAACAAzAAAAQAAtAAAACgAAAAAAQAAtAAAAAEAIAAAAAAAANACAAAAAAAAAAAAAAAAAAAAAAD/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////vr6+/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/76+vv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////7vf//+rz7v/Yzc3/0NLY/+DX2f/N4PL/3tXI/8jV4v/Q0cX/1tDI/9ve2f/U0tX/0NLQ/83I0P/I2N7/4tnI/9LZ4v/v6tz/5eXl////9////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////83g9//e3M3/vrG3/8TCxv/Xwrz/vdfu/8W/rv/K1tX/x8bB/8LJxv/Oxb7/yMTE/8vCwv+3scH/zd7Z/9DNyP/BwcT/z97X/82xq/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////u9/v/+/Lu////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////++vr7/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/vr6+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8OAAAAFAAAAAAAAAAQAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD+/wAAAwoBAAAAAAAAAAAAAAAAAAAAAAABAAAAAtXN1ZwuGxCTlwgAKyz5rjAAAADUAAAACwAAAAEAAABgAAAABQAAAGgAAAAGAAAAcAAAABEAAAB4AAAAFwAAAIAAAAALAAAAiAAAABAAAACQAAAAEwAAAJgAAAAWAAAAoAAAAA0AAACoAAAADAAAALUAAAACAAAAECcAAAMAAAABAAAAAwAAAAEAAAADAAAAEgAAAAMAAAAAAA8ACwAAAAAAAAALAAAAAAAAAAsAAAAAAAAACwAAAAAAAAAeEAAAAQAAAAEAAAAADBAAAAIAAAAeAAAABgAAAFRpdHJlAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAP7///8JAAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAA/v///xcAAAAYAAAAGQAAABoAAAAbAAAAHAAAAB0AAAAeAAAAHwAAACAAAAAhAAAAIgAAACMAAAAkAAAAJQAAACYAAAAnAAAAKAAAACkAAAAqAAAAKwAAACwAAAAtAAAALgAAAC8AAAAwAAAAMQAAADIAAAAzAAAANAAAADUAAAA2AAAANwAAADgAAAA5AAAAOgAAADsAAAA8AAAAPQAAAD4AAAA/AAAAQAAAAEEAAABCAAAAQwAAAEQAAABFAAAARgAAAEcAAABIAAAASQAAAEoAAABLAAAATAAAAE0AAABOAAAATwAAAFAAAABRAAAAUgAAAFMAAABUAAAAVQAAAFYAAABXAAAAWAAAAFkAAABaAAAAWwAAAFwAAABdAAAAXgAAAF8AAABgAAAAYQAAAGIAAABjAAAAZAAAAGUAAABmAAAAZwAAAGgAAABpAAAAagAAAGsAAABsAAAAbQAAAG4AAABvAAAAcAAAAHEAAAByAAAAcwAAAHQAAAB1AAAAdgAAAHcAAAB4AAAAeQAAAHoAAAB7AAAAfAAAAH0AAAB+AAAAfwAAAIAAAACBAAAAggAAAIMAAACEAAAAhQAAAIYAAACHAAAAiAAAAIkAAACKAAAAiwAAAIwAAACNAAAAjgAAAI8AAACQAAAAkQAAAJIAAACTAAAAlAAAAJUAAACWAAAAlwAAAJgAAACZAAAAmgAAAJsAAACcAAAAnQAAAJ4AAACfAAAAoAAAAKEAAACiAAAAowAAAKQAAAClAAAApgAAAKcAAACoAAAAqQAAAKoAAACrAAAArAAAAK0AAACuAAAArwAAALAAAACxAAAAsgAAALMAAAC0AAAAtQAAALYAAAC3AAAAuAAAALkAAAC6AAAAuwAAALwAAAC9AAAAvgAAAL8AAADAAAAAwQAAAMIAAADDAAAAxAAAAMUAAADGAAAAxwAAAMgAAADJAAAAygAAAMsAAADMAAAAzQAAAM4AAADPAAAA0AAAANEAAADSAAAA0wAAANQAAADVAAAA1gAAANcAAADYAAAA2QAAANoAAADbAAAA3AAAAN0AAADeAAAA3wAAAOAAAADhAAAA4gAAAOMAAADkAAAA5QAAAOYAAADnAAAA6AAAAOkAAADqAAAA6wAAAOwAAADtAAAA7gAAAO8AAADwAAAA8QAAAPIAAADzAAAA9AAAAPUAAAD2AAAA9wAAAPgAAAD5AAAA+gAAAPsAAAD8AAAA/QAAAP4AAAD/AAAAAAEAAAEBAAACAQAAAwEAAAQBAAAFAQAABgEAAAcBAAAIAQAACQEAAAoBAAALAQAADAEAAA0BAAAOAQAADwEAABABAAARAQAAEgEAABMBAAAUAQAAFQEAABYBAAAXAQAAGAEAABkBAAAaAQAAGwEAABwBAAAdAQAAHgEAAB8BAAAgAQAAIQEAACIBAAAjAQAAJAEAACUBAAAmAQAAJwEAACgBAAApAQAAKgEAACsBAAAsAQAALQEAAC4BAAAvAQAAMAEAADEBAAAyAQAAMwEAADQBAAA1AQAANgEAADcBAAA4AQAAOQEAADoBAAA7AQAAPAEAAD0BAAA+AQAAPwEAAEABAABBAQAAQgEAAEMBAABEAQAARQEAAEYBAABHAQAASAEAAEkBAABKAQAASwEAAEwBAABNAQAATgEAAE8BAABQAQAAUQEAAFIBAABTAQAAVAEAAFUBAABWAQAAVwEAAFgBAABZAQAAWgEAAFsBAABcAQAAXQEAAF4BAABfAQAAYAEAAGEBAABiAQAAYwEAAGQBAABlAQAAZgEAAGcBAABoAQAAaQEAAGoBAABrAQAAbAEAAG0BAABuAQAAbwEAAHABAABxAQAAcgEAAHMBAAB0AQAAdQEAAHYBAAB3AQAAeAEAAHkBAAB6AQAAewEAAHwBAAB9AQAAfgEAAH8BAAD+////gQEAAIIBAACDAQAAhAEAAIUBAACGAQAAhwEAAP7////9/////f////3////9////jQEAAP7////+/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////9SAG8AbwB0ACAARQBuAHQAcgB5AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFgAFAf//////////AwAAAAYJAgAAAAAAwAAAAAAAAEYAAAAAAAAAAAAAAAAgFZlgpnrRAY8BAACAAAAAAAAAADEAVABhAGIAbABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAIB/////wUAAAD/////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAB4aAAAAAAAAVwBvAHIAZABEAG8AYwB1AG0AZQBuAHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAgEBAAAA//////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAFAFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAACAQIAAAAEAAAA/////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABYAAAAM0wIAAAAAAAUARABvAGMAdQBtAGUAbgB0AFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAA4AAIB////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAEAAAAQAAAAAAAAAQBDAG8AbQBwAE8AYgBqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIAAgD///////////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///////////////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8BAP7/AwoAAP////8GCQIAAAAAAMAAAAAAAABGIAAAAERvY3VtZW50IE1pY3Jvc29mdCBXb3JkIDk3LTIwMDQACgAAAE1TV29yZERvYwAQAAAAV29yZC5Eb2N1bWVudC44APQ5snEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" }
-
- - do:
- search:
- index: test
- body:
- stored_fields: [file.content, file.author, file.date, file.content_length, file.content_type]
- - match: { hits.total: 1 }
- - match: { hits.hits.0.fields: {
- file.content: ["Test elasticsearch\n"],
- file.author: ["David Pilato"],
- file.date: ["2016-03-10T08:25:00Z"],
- file.content_length: ["205312"],
- file.content_type: ["application/msword"]
- }
- }
-
-
----
-"Test mapper attachment processor with .docx file":
-
- - do:
- index:
- index: test
- type: test
- id: 1
- refresh: true
- body: { file: "UEsDBBQABgAIAAAAIQBtiidLZgEAAFQFAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIooAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0lMtugzAQRfeV+g/I2wqcdFFVVUgWfSzbSE0/wLEH4tYv2c7r7ztAgqooAalJNkgwc+89A3hGk41WyQp8kNbkZJgNSAKGWyFNmZOv2Vv6SJIQmRFMWQM52UIgk/HtzWi2dRASVJuQk0WM7onSwBegWcisA4OVwnrNIt76kjrGf1gJ9H4weKDcmggmprHyIOPRCxRsqWLyusHHDQnKSfLc9FVROWHOKclZxDKtqvSozoMKHcKVEQd06Y4sQ2XdExbShbvTCd8OyoMEqavR6gJqPvB1eikgmTIf35nGBrq2XlBh+VKjKOse7gijLQrJodVXbs5bDiHgd9IqayuaSbNnP8kR4lZBuDxF49sfDzGi4BoAO+dehDXMP69G8ce8F6TA3BmbK7g8RmvdCxHx1EJzHZ7NUdt0RWLn1FsXcAv4f4y9P66VOsWBHfgou/+6NhGtz54Pqk0gQBzJpvVOHP8CAAD//wMAUEsDBBQABgAIAAAAIQDHwie8/wAAAN8CAAALAAgCX3JlbHMvLnJlbHMgogQCKKAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArJLNSgMxEIDvgu8Q5t7NtoqINNuLCL2JrA8wJtPd6OaHZKrt2xtF1IVlEexx/j6+SWa9ObhBvFLKNngFy6oGQV4HY32n4LG9W1yDyIze4BA8KThShk1zfrZ+oAG5DOXexiwKxWcFPXO8kTLrnhzmKkTypbILySGXMHUyon7BjuSqrq9k+s2AZsQUW6Mgbc0FiPYY6X9s6YjRIKPUIdEipjKd2JZdRIupI1Zggr4v6fzZURUyyGmhy78Lhd3OaroNeu/I85QXHZi8ITOvhDHOGS1PaTTu+JF5C8lI85Wes1md9sO437snj3aYeJfvWvUcqfsQkqOzbN4BAAD//wMAUEsDBBQABgAIAAAAIQATqj6H9gAAADEDAAAcAAgBd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVscyCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKySy2rDMBBF94X+g5h9LTt9UELkbEoh29b9AEUeP6gsCc304b+vaEjr0GC68PJeMfeeQbPZfg5WvGOk3jsFRZaDQGd83btWwUv1eHUPgli7WlvvUMGIBNvy8mLzhFZzGqKuDyRSiiMFHXNYS0mmw0FT5gO69NL4OGhOMrYyaPOqW5SrPL+TcZoB5Umm2NUK4q6+BlGNAf+T7ZumN/jgzduAjs9UyA/cPyNzWo5SrI4tsoKJmaVEkOdBbpYEabzjSu8t/mL8WHMQt0tCcJqdAHzLg1nMMRRLMhCPFiefcdBz9atF6/9cw9E5IsiTQy+/AAAA//8DAFBLAwQUAAYACAAAACEA9WKOYGUCAAAOBwAAEQAAAHdvcmQvZG9jdW1lbnQueG1spFXfb9owEH6ftP8h8jtNwijQiFDR0qI+TKpK9zwZx0ksYp9lGyj763dOIGSbVtGSh9j367vv7mJncvsmq2DLjRWgUhJfRSTgikEmVJGSH6+PvTEJrKMqoxUonpI9t+R2+vXLZJdkwDaSKxcghLLJTrOUlM7pJAwtK7mk9koKZsBC7q4YyBDyXDAe7sBkYT+Ko3qnDTBuLea7p2pLLTnASTgPTVJ23PajaIyyUC3Gv4xAc4XGHIykDkVTYIRZb3QPMTV1YiUq4fYea9jCbFOyMSo5YPRaHj4mQQLJVlZHZ3jPtyF6WI4R5hySTcj80PKaXmh4hYRB2VLoU98+i4bG8gjybsGdYnc6Hlw29LmhO1xOgOfQz5ogWTXM30eMozMm4iHaiHMo/JnzyKT78e0+15pOc+PrjwH0/wbQxWXDWRjY6BOauAztSa1bLH+VfADrMORuafYyMsuSajyBkiVPhQJDVxUywpEF2PXAf9ZkilfcCrK9XzWqB4mmhj5lKRmNhg/X9/GI1FrH31yjbR7UJnidZi8piaK7m8Hw5rpVzXlON5XzlvEwGs8f6yzGv9z0lVsX4JG2TjDLqWHlJPR6/65dVgBrf1ktHTUOIQVmjTy2ohLZ/1zAHWVrEnZ9H1TWeoY1lPZmy5l7Nv9nukS7185m8WjW9EIXy19oxdMRxzdRnbfE/XA8qJG9w3fqIR3gIY4HdX8SI4rSncQVOAfyJFc871hLTjOO1+EoGnsxB3Adsdi4WjykY1BZ1FpNGW98ajX+lRZG+KIrofizcAxZfhseq28Kr7fNcMPTj2z6GwAA//8DAFBLAwQUAAYACAAAACEAbU1ZqyEGAACOGgAAFQAAAHdvcmQvdGhlbWUvdGhlbWUxLnhtbOxZy47bNhTdF+g/ENo7lm3Jj0E8gS3bSZuZJMg4abOkJVpihhINkpoZIwjQLyhQIC26KdBdC3QToP2D/kuKNv2IUpRlkzbdQToOEBSxAYuPcy8P7yUPJev2nauUgAvEOKZZ32ncch2AspBGOIv7zpPppNZ1ABcwiyChGeo7S8SdO8effnIbHokEpQhI+4wfwb6TCLE4qtd5KJshv0UXKJN9c8pSKGSVxfWIwUvpNyX1puu26ynEmQMymEq30+T3n6Wzh/M5DpFzXHkfE/mTCV40hISdFb7RymSYMwRzhY3OG8WFL3lAGLiApO/IgSJ6OUVXwgEEciE7+o6rPk79+HZ9bUTEHlvNbqI+K7uVQXTeVHYsnq0NPc/32oO1fwUgYhc37ozb4/banwLAMJQzLbnoWH/YG478FVYDlUWL71Fn1GoYeM1/awc/8IuvgVegsujt4CeTYBNDDVQWfUtMOs3AM/AKVBbbO/iOOxh5HQOvQAnB2fkO2vXbraCa7Royp+SeFd7zvUmnuYJvUHVtdZX2mdi31lL4nLKJBKjkQoEzIJYLNIehxAWQ4BnD4ATHiVx4C5hRLpvdpjtxW/K3+HqqpCICjxDUrMumkO80FXwADxleiL7zufTqaJBnObhLRYLD1ai7FvdgFusWb3/65u8fvgJ//frj21ff2vFcx49QFn+JYfZvAwjd4M13r//47fWb77/+85dXFviAwZkOn+IUcfAAXYLHNJWTswyAZuzdLKYJxLrFIIs5zGBhY0GPZfx09IMlJNCCGyIzkk+ZlAob8G7+3CB8lrBcYAvwfpIawFNKyZAy65zuF2PpUciz2D44y3XcYwgvbGMHW3ke5wu55rHNZZAgg+YjIlMOY5QhAYo+eo6QxewZxkZcT3HIKKdzAZ5hMITYGpIpnhmraWN0D6cyL0sbQZlvIzanT8GQEpv7EbowkXJ3QGJziYgRxrswFzC1MoYp0ZEnUCQ2kmdLFhoB50JmOkaEgnGEOLfZPGRLg+59KTH2tJ+SZWoimcDnNuQJpNTY4PQ8SGC6sHLGWaJjP+PncolC8IgKKwlq7pCiLvMgxWNfup9iZKT7+r39RMqQfYEUPTmzbQlEzf24JHOIlPP6lqanOLtW4Lek3X9/0n6KszChds09iKjboTeR8wHD1v20LeL7cNvSHVAW4Q9fuUcwzx4huVks0I/C/VG4//fCvW8/H16uNwqtbuOrm3XlJt175z7HhJyJJUEnXGk7l9OLJrJRVZTR+kFhkcjiajgDFzOoyoBR8QUWyVkCF3KYhhoh5ivXMQcLyuXpoJqtvosOkqenNCpbG43q2VQaQLFpl6dL1S7PIlG2tjubh7C1e1WL1cNyRaCwfRcS2mAmiZaFRKdqvIaEmtlBWPQsLLqF+70s1GWVFbn/ACz+1/C9kpFcb5CgqMhTaV9l9+CZ3hdMc9pNy/R6BdfDZNogoS03k4S2DBMYoe3mA+e6t0mpQa8IxS6NTvd95LoQkS1tIJlZA5dyz7V86SaEi74zl/eFspgupD9e6CYkcdZ3QrEK9H9RlgXjYgR5UsJUVzn/FAvEAMGpXOt6Gki24dZodoo5fqDkeu6HFzl10ZOM5nMUij0tm6rsK51Ye28ILio0l6TPkugSzEjOHkMZKL/TKAIYYS7W0Yww0xb3JopbcrXaisZ/ZpstCskigasTRRfzEq7KazraPBTT7VmZ9dVkZnGRpBufutcbFR2aaO45QIpT064f7++Q11htdN9gVUr3ttb1Kq3bd0rc/EDQqG0GM6gVjC3UNq0mtQPeEGjDrZfmvjPi0KfB9qotDojqvlLVdl5O0NlzufJH8nY1J4IrquhKPiME1d/KpRKo1kpdrgTIGe47L1x/4AVNP6i5XX9c81qeW+v6g1Zt4PutxthvuKNh86UMikjShl+OPZHPM2S5evmi2ndewKTVbfatkKZ1qt6s1JWxegHTaBovYMo3L2Ba9DsAy8i8aDcnvVZv2K71WoNJzRsNu7Ve0B7WRu2gM5qMAr/bm7x0wIUCe4NW4LXH3Vq7EQQ1r+0W9Lu9WsdrNgdeZ9Ade4OXq1jLmVfXKryK1/E/AAAA//8DAFBLAwQKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAGRvY1Byb3BzL3RodW1ibmFpbC5qcGVn/9j/4AAQSkZJRgABAQAASABIAAD/4QCARXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAIdpAAQAAAABAAAATgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAWmgAwAEAAAAAQAAAgAAAAAA/+0AOFBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAAAAOEJJTQQlAAAAAAAQ1B2M2Y8AsgTpgAmY7PhCfv/AABEIAgABaQMBEQACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/3QAEAC7/2gAMAwEAAhEDEQA/AP7Yfgx8GPg9N8HvhRLL8KPhrLLL8NfAskkkngTws8kkj+F9LZ3d200s7uxLMzHczEk5JNAHpX/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQB/Nd/wrT4c/8ARP8AwT/4Sug//INAH//Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAeDfEX4/+Hvhf8QfD3gbxN4W8Vx6ZrfgHxz8TNQ+JS33w9sPh34O8G/DOXRk8fav4uvfEHj3RPE1jD4Xt/Evhu/v307wrq0M1hrUU+ny3Z03Xk0oAxf8Ahrr9n+W68P2WneN7nXLrxJ4x0D4f2sHh7wj411x9L8aeI9T13RrPwx4qGmeHrr/hD9esNV8Na1YeJNG8Uto+q+Eriz2+KLLRxNbvKAXNd/am+C2h+MbHwIPFDa34kn8V6n4Q1Ox8OWU+sSaBf6R8P/iz8Q7+7v4YALrVNPt7T4K+PPCs58JW/ia/tfH2nf8ACHXun22rW2qRaWAZN7+2L+z3bRaLNY+N5/ECa/4p0XwRYyeG/DPinWLW38XeIPh9r/xR07w7rupW+jHSvC2qQ+B/Dt5r2vweJ77SB4Ns7zRZvGjeHoNc0qa6ANXSP2r/ANn7WUia2+JWhxg6X4r1q7mlNxLpWl6R4Asbe7+IWsaj4lsobvwrD4f+H91dQeHvGvimLXZ/C3hzxhJH4O1LWovE8sWkOANvf2s/2eNN06x1bUviZpmnadqdvey6fdahpXiSyS7v9N8cH4a6l4YgW50aGR/HenePlfwnf/D0L/wnNlrUctnc+HonikKgGaP2wfgC/wARfCHwzh8arNrHje68eaVoWsLYXkXhaTxJ8O/Gfw8+Hmu+GJtcuY4Ihqs/jj4m+HPB+l3MME+g3njFbzwS+tW/jP8As/QL8A+nKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/mXoA//1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQB5b8Sfgr8MPi/bahZ/EfwrB4otNV+HPxJ+Emo2t1qGsWltefDz4vW2gWfxF8OTw6ZqNlG0XiO18MaJDJf7Rq2mrZE6Pf6c11etcAHk2k/sUfs26Fq/hbXtJ8DavYaz4JstA07wrqNt8R/ifHc6LY+G/Gcfj/TbS0ZfGKqLZ/FKTX2p28ivBrFpqGs6LqsV3omu65p2oAEGtfsOfsueILvx9fal8MAbj4neIfEXivxo1j4y8f6Qmp+IvF3hL4ieCPE+rW0Ok+KLKDRbjXfDvxY+IUOprocenQXWpeJbrxBJEfEFtp+p2gBQ0H9gr9lfwv8AYz4f+HWraW2nW/hSx06S2+J3xZ86w0/wXbeLLLQ9MtLiTx01xBpf2Dx54107WdMSQWHiTT/FWvWXiKDVLbVLtJQB17+wd+y1qWhWfhXU/h9rOqeEdPvfH9/p3g7VPij8W9S8H6dc/FLRPEnh/wCIT6f4WvvHU2g2K+LNM8Y+K01aO10+FJbrxFrGoIqX17PcMAV9M/YB/ZM0jxR4H8Z2PwtuF8S/Di38NWvgzUZ/iF8T7saNF4R+Is3xY0DFjc+M5tNv5LP4gTvr8s2p2l5JfYTS79rnRkTT6AO9sf2UfgVp97a30PhPWJ5dP8S3vizR4NR+IPxI1XTvD+rah8V/AHxwuYvDek6n4vvNL8OaGPin8MPBPiu28LaJZ2Hhmxk0iXSbHSbfQNX1rS9SAPoqgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP5l6AP/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7Yfgx8Z/g9D8HvhRFL8V/hrFLF8NfAsckcnjvwskkcieF9LV0dG1IMjowKsrDcrAg4INAHpX/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQB/Nd/wsv4c/8ARQPBP/hVaD/8nUAf/9kAAFBLAwQUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAHdvcmQvc2V0dGluZ3MueG1stFZLj9s2EL4X6H8wdK5Wj8iOV403sL1xs8E6WazcS2+URNnE8iEMKatO0f/eESWunGYRuA3ii8n55s1vxn7z9k/BJ0cKmim58KKr0JtQWaiSyf3C+3238efeRBsiS8KVpAvvRLX39ubnn960qabGoJqeoAupU1EsvIMxdRoEujhQQfSVqqlEsFIgiMEr7ANB4Kmp/UKJmhiWM87MKYjDcOYNbtTCa0CmgwtfsAKUVpXpTFJVVaygw5ezgEvi9ia3qmgElcZGDIByzEFJfWC1dt7E//WG4ME5OX6riKPgTq+NwgvKbRWUzxaXpNcZ1KAKqjU+kOAuQSbHwMlXjp5jX2HsoUTrCs2j0J7OM5/+NwfxvxxofkklPXTPciDQ82QoQxTp3V4qIDlHVmI5E8zIu0FaflZKTNq0plDg2yCnw9ALOgA7oqrMEEMR1jXl3JK84JSgwzbdAxFITyexNiWtSMPNjuSZUTUqHQnm/Tqc9/DhVB+otCT6A8fD4Uk87fHiQIAUhkJWkwKjrZU0oLjTK9VHZdY4CoAv1VtocqQPQI+Mtg+sMA3Q3pGdl/GU9bOHjiQR2IAv5mmrStoV1AC7/I06A5tU5HJ/MZDCXQGspLuu8Zk5cbrBmjL2mS5l+aHRhqFH25DvyOBbCWC7MfInpMruVNMNJV2P9A8KZh9ow1m9ZQAK7mSJlPphwVhVUcAADCm6RdYxUK3t83tKSlzR3xk3OKcRLvxSu8OjUsaphuF8Fs5vN32mHXoJslxGr5fJS8jqOpldW0oFz1FF2i3LB3CnjkIT0VusiciBkcm2W6dBp5HD04pJh+cU9wM9R7Imd6Dv94AWhPMNjp4D7AoQacl0fUsre+ZbAvvR76ABL0pxDXx49tWtFQq/gWrqHm2B1D01nEqUJIMlk+aeCSfXTZ45K4kb7QxqZPnpCLZPY3va1OAT2xG7J5YqVrcCf/M4UIlD1tGAbkld92zK99HC42x/MFFHAIO3En917SXfxwMWWyzuMXshRVcZag+HURY72ZneKyd7NcoSJ0tG2dTJpqNs5mSzToZLlALu4icktjt28kpxrlpavh/xr0RuSxcMXzw7iXxcrr/0GGcaJ63GPWwUOOxXi0VJWqriDsmKp/655+t3yTxa9vDU7m+zQx49YWsfabUimpYD5kynvelfm+4zj1f+MrqN/WQ2XfnzeP3OX23iZbReXs+m6/jvYQ7cX6ebfwAAAP//AwBQSwMEFAAGAAgAAAAhAPC8NQHcAQAA8QUAABIAAAB3b3JkL2ZvbnRUYWJsZS54bWy8k9tq4zAQhu8LfQej+8ay4vRg6pQ0bWBh6cXSfQBFkW2xOhhJiTdvvyPZcQMhbJallUHI/4x+jT40j0+/lUx23DphdImyCUYJ18xshK5L9PN9dXOPEuep3lBpNC/Rnjv0NL++euyKymjvEtivXaFYiRrv2yJNHWu4om5iWq4hWBmrqIdfW6eK2l/b9oYZ1VIv1kIKv08JxrdosLGXuJiqEoy/GLZVXPu4P7VcgqPRrhGtO7h1l7h1xm5aaxh3Du6sZO+nqNCjTZafGCnBrHGm8hO4zFBRtILtGY4rJT8MZv9mQEYDxYpvtTaWriXAh0oSMEPzgX7SFZoqCCypFGsrYqCl2jieQWxHZYkwwSs8gzl8OZ6GGaUhkTXUOh5M+kTcyxVVQu4PKt160+ut8Kw5yDtqRaipDzlRQ2Dr1rhErxgGWa1Qr2QlykFYLEeFhKPiyAZlOio4KCz69BkPcReLPmMOnJn2AE5AvAvFXfLGu+SHUVSfAULwLYCYAY4AZvr5QMji9QjIEpS7+/xw/Q8gD38H0mO8HMgCypJnMDwDhnx4GfF1fD6G43cxYJh+BYahQZLvom782TYJzfFFbbIIFZPjVxHahOC75xMc8fL/2SbDws3/AAAA//8DAFBLAwQUAAYACAAAACEA4IvKVR8BAAARAgAAFAAAAHdvcmQvd2ViU2V0dGluZ3MueG1slNFRS8MwEAfwd8HvUPK+pRs6tKwbgkz2MgbVD5Cl1zWY5EIua7dv71nnRHyZbzku9+P+3Hx5dDbrIJJBX4rJOBcZeI218ftSvL2uRg8io6R8rSx6KMUJSCwXtzfzvuhhV0FK/JMyVjwVTpeiTSkUUpJuwSkaYwDPzQajU4nLuJdOxfdDGGl0QSWzM9akk5zm+UycmXiNgk1jNDyjPjjwaZiXESyL6Kk1gb61/hqtx1iHiBqIOI+zX55Txl+Yyd0fyBkdkbBJYw5z3migeHySDy9nf4D7/wHTC+B0sd57jGpn+QS8ScaYWPANlLXYbzcv8rOocYOpUh08UcUpLKyMhaETzBEsbSGuvW6zvuiULcXjTHBT/jrk4gMAAP//AwBQSwMEFAAGAAgAAAAhABZNBGBtAQAA7wIAABEACAFkb2NQcm9wcy9jb3JlLnhtbCCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJySUW+CMBSF35fsP5C+Q4suxhDAZDM+zcRkLlv21rVX7YS2aavIv18BxbH5tLd7e757uJw2nZ3KIjiCsULJDMURQQFIpriQ2wy9rhfhFAXWUclpoSRkqAaLZvn9Xcp0wpSBlVEajBNgA+8kbcJ0hnbO6QRjy3ZQUht5Qnpxo0xJnW/NFmvK9nQLeETIBJfgKKeO4sYw1L0jOlty1lvqgylaA84wFFCCdBbHUYyvrANT2psDrfKDLIWrNdxEL2JPn6zowaqqomrcon7/GL8vn1/aXw2FbLJigPKUs8QJV0Ce4mvpK3v4/ALmuuO+8TUzQJ0y+ZweBQ9WovBdC12EJvI91JUy3PrxQecxDpYZoZ2/yM58cODpglq39De7EcAf61/f+as3IwaOonkZedwSfZueY+52Ax74eJIuzIvyNn6arxcoH5F4EpJxGJM1mSajh4SQj2a9wfzVsDwv8G/Hi0GX0PCJ5t8AAAD//wMAUEsDBBQABgAIAAAAIQCBlv05MgsAAGRyAAAPAAAAd29yZC9zdHlsZXMueG1svJ3bctu6FYbvO9N34OiqvXB8jJ14trPHduLaUzvbO3Kaa4iEJNQgofLgQ5++IEhJkBdBcQGrvrIlan0A8eMHsEBS+u33l1RGTzwvhMrORvsf9kYRz2KViGx2Nvr5cLXzaRQVJcsSJlXGz0avvBj9/uWvf/nt+bQoXyUvIg3IitM0PhvNy3JxurtbxHOesuKDWvBMH5yqPGWlfpnPdlOWP1aLnVilC1aKiZCifN092Ns7HrWYfAhFTaci5l9VXKU8K038bs6lJqqsmItFsaQ9D6E9qzxZ5CrmRaFPOpUNL2UiW2H2jwAoFXGuCjUtP+iTaWtkUDp8f8/8l8o14CMOcLACpPHpzSxTOZtI3fq6JpGGjb7o5k9U/JVPWSXLon6Z3+fty/aV+XOlsrKInk9ZEQvxoEvWkFRo3vV5VoiRPsJZUZ4XgnUenNf/dB6Ji9J6+0IkYrRbl1j8Vx98YvJsdHC0fOeyrsHGe5Jls+V703zn6oddk7MRz3Z+juu3Jpp7NmL5zvi8DtxtT6z5a53uYvWq+dSbttFdQ3eUcdNf9VE+vVXxI0/GpT5wNtqri9Jv/ry5z4XKdZ88G33+3L455qm4FknCM+uD2Vwk/NecZz8Lnqzf//PK9Kv2jVhVmf7/8NOe0UsWybeXmC/qXqqPZqxuve91gKw/XYl14Sb8P0vYfttmXfFzzmqrRvtvEab6KMRBHVFYZ9vNrN6cu/kUqqDD9yro6L0K+vheBR2/V0En71XQp/cqyGD+nwWJLOEvjRFhMYC6jeNwI5rjMBua4/ASmuOwCprjcAKa4+joaI6jH6M5jm6K4JQqdvVCq7MfOnp7P3f7HOHH3T4l+HG3zwB+3O0Dvh93+/jux90+nPtxt4/eftztgzWe2yy1ohtts6wMdtlUqTJTJY9K/hJOY5lmmfyFhldPejwnOUkCTDOytRNxMC1m5vX2HmJM6j+fl3XKFalpNBWzKtdpb2jFefbEpU5AI5YkmkcIzHlZ5Y4W8enTOZ/ynGcxp+zYdFApMh5lVToh6JsLNiNj8Swhbr4lkWRQWHVoVpXz2iSCoFOnLM5VeNUUIxsfbkUR3lY1JLqopORErO80XcywwnMDgwlPDQwmPDMwmPDEwNKMqolaGlFLtTSiBmtpRO3W9E+qdmtpRO3W0ojaraWFt9uDKKUZ4u1Vx/7wvbtLqeod5+B6jMUsY3oBED7dtHum0T3L2Sxni3lU7x93Y+1zxpZzoZLX6IFiTluRqNb1potc6rMWWRXeoBs0KnOteET2WvGIDLbihVvsTi+T6wXaNU0+M64mZadpDWmQacdMVs2CNtxtrAzvYWsDXIm8ILNBN5agB3+vl7O1nBQj37qW4RVbs8Jt9XZUIq1eiySopVTxI80wfP264LlOyx6DSVdKSvXMEzriuMxV09dsyx8YSQZZ/lu6mLNCmFxpAzF8ql9eq47u2CL4hO4lExmNbt92UiZkRLeCuH64u40e1KJOM+uGoQFeqLJUKRmz3Qn82y8++TtNBc91Epy9Ep3tOdH2kIFdCoJJpiGphIikl5kiEyRzqOH9k79OFMsTGtp9zpvbQ0pORByzdNEsOgi8pcfFZz3+EKyGDO9fLBf1vhCVqR5IYNa2YVFN/s3j8KHuu4pIdob+qEqz/2iWuiaaDhe+TNjAhS8RjJp6eqj7L8HJbuDCT3YDR3Wyl5IVhXBeQvXmUZ3ukkd9vuHJX8tTUuXTStI14BJI1oJLIFkTKlmlWUF5xoZHeMKGR32+hF3G8Ai25AzvH7lIyMQwMColDIxKBgOj0sDASAUIv0PHgoXfpmPBwu/VaWBESwALRtXPSKd/oqs8FoyqnxkYVT8zMKp+ZmBU/ezwa8SnU70IpptiLCRVn7OQdBNNVvJ0oXKWvxIhv0k+YwQbpA3tPlfT+rkBlTU3cRMg6z1qSbjYbnBUIv/iE7Kq1SzKehHsiDIplSLaW1tPOCZy8961bWHmmYvgKpjN9lv+xClW4xaM6DJAAwuXzYKFT1MWLHyasmDh05QFC5+mLFj4NGXBwu9fvpcs5nMlE547jNhXkWi8YHF7bQlcox60V38rZvMyGs9Xl6hszPHe1sjlLtNG2PYCuwaK44OesDueiCpdVhQ+AXR8ODzYGHojePmgVk/wevm7EflxYCQs83h75Dq124g8GRgJy/w0MNKMUhuRfYP4V5Y/dnaEk77+s9qYcHS+k75etAruLLavI60iu7rgSV8v2rBKdB7H9SUuqM4wz7jjh5nHHY9xkZuCsZObMthXbkSfwX7wJ1EvRzGDpilvdcvP2+IOzZQ6aOT8s1LNxaaNq6TDn0S80av9rOBRJ+dw+NXWjVHG3Y6Dhxs3YvC440YMHoDciEEjkTMcNSS5KYPHJjdi8CDlRqBHKzgj4EYrGI8brWC8z2gFKT6jVcAqwI0YvBxwI9BGhQi0UQNWCm4Eyqgg3MuokII2KkSgjQoRaKPCBRjOqDAeZ1QY72NUSPExKqSgjQoRaKNCBNqoEIE2KkSgjeq5tneGexkVUtBGhQi0USECbVSzXgwwKozHGRXG+xgVUnyMCiloo0IE2qgQgTYqRKCNChFoo0IEyqgg3MuokII2KkSgjQoRaKM2z8f6GxXG44wK432MCik+RoUUtFEhAm1UiEAbFSLQRoUItFEhAmVUEO5lVEhBGxUi0EaFCLRRzaWDAKPCeJxRYbyPUSHFx6iQgjYqRKCNChFoo0IE2qgQgTYqRKCMCsK9jAopaKNCBNqoENHXP9vr6q5nQ/bxu57Ox0yGX7pqK/XD/v4BG3U4HLWslZs1/AGaC6Ueo86nZQ9NvjEMIiZSKLNF7bgXxOaaC6Soq/V/XPY/lmbTA78prH2Ax1zoB/CjoZFgT+Wor8vbkSDJO+rr6XYkWHUe9Y2+diSYBo/6Bl3jy+WdVHo6AsF9w4wVvO8I7xutrXDYxH1jtBUIW7hvZLYCYQP3jcdW4MeoHpzfRn8c2E7Hq5uiAaGvO1qEEzehr1tCrZbDMTTGUNHchKHquQlDZXQTUHo6MXhh3Si0wm6Un9TQZlip/Y3qJmClhgQvqQHGX2qI8pYaovykhgMjVmpIwErtPzi7CV5SA4y/1BDlLTVE+UkNpzKs1JCAlRoSsFIHTshOjL/UEOUtNUT5SQ0Xd1ipIQErNSRgpYYEL6kBxl9qiPKWGqL8pAZZMlpqSMBKDQlYqSHBS2qA8Zcaorylhqg+qc0uyobUKIWtcNwizArETchWIG5wtgI9siUr2jNbsgie2RLUaqk5LluyRXMThqrnJgyV0U1A6enE4IV1o9AKu1F+UuOypS6p/Y3qJmClxmVLTqlx2VKv1LhsqVdqXLbklhqXLXVJjcuWuqT2H5zdBC+pcdlSr9S4bKlXaly25JYaly11SY3LlrqkxmVLXVIHTshOjL/UuGypV2pctuSWGpctdUmNy5a6pMZlS11S47Ilp9S4bKlXaly21Cs1LltyS43LlrqkxmVLXVLjsqUuqXHZklNqXLbUKzUuW+qV2pEt7T5v/GpYzTa/d6c/XL4ueP3F8dYDM0nzxbntRUDzwZtk9etedXBdk6j9xbP2bVPh9oJhU6IJhEXFc11W3H7ll6OoeyWFPm+WJ/pwCYp0fLOvqcL65JefbhtzfRG0+dzGBc/eGpd1Y/fU1ojBqt72aRRzVfFz2wW31VHXaCKbH8PT/9xkiQY8t7+w1tQ1eWENSh+/5FLesebTauH+qOTTsjm6v2cen31zfNJ8YaEzPjeDhBOwu1mZ5mX7w3eOFm9+wqC9eu1o9fMqrjIutRt4R5ub+ylCm3tdweV/xZf/AQAA//8DAFBLAwQUAAYACAAAACEAQP7QLGkBAAC3AgAAEAAIAWRvY1Byb3BzL2FwcC54bWwgogQBKKAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACcUk1LxTAQvAv+h9K7L32CH8i+iCjiQUV4Vc8h2bbBNAnJKr5/78ZqrXgzp92ZZHZmCZy/j656w5Rt8Jt6vWrqCr0Oxvp+Uz+21wendZVJeaNc8Lipd5jrc7m/Bw8pRExkMVcs4fOmHojimRBZDziqvGLaM9OFNCriNvUidJ3VeBX064iexGHTHAt8J/QGzUGcBetJ8eyN/itqgi7+8lO7i6wnocUxOkUo78tLtzKBRhAzCm0g5Vo7omwYnht4UD1muQYxFfAcksnyEMRUwOWgktLE+5PrExCLFi5idFYr4sXKO6tTyKGj6k5p6ynkoSoKIJa3gENsUb8mS7viY9nCrfWTk6lgZ0n1ScXhy97cwVYrh5ccX3bKZQTxAxSVl/wY23BVYn/xv8FFpmdLwzYqXQafLtMtCNgyioa9zuNmAG54/ckVeX7rezTfd/4SZV9P0z+U66NVw+dzO98YZ5w/iPwAAAD//wMAUEsBAi0AFAAGAAgAAAAhAG2KJ0tmAQAAVAUAABMAAAAAAAAAAAAAAAAAAAAAAFtDb250ZW50X1R5cGVzXS54bWxQSwECLQAUAAYACAAAACEAx8InvP8AAADfAgAACwAAAAAAAAAAAAAAAACfAwAAX3JlbHMvLnJlbHNQSwECLQAUAAYACAAAACEAE6o+h/YAAAAxAwAAHAAAAAAAAAAAAAAAAADPBgAAd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVsc1BLAQItABQABgAIAAAAIQD1Yo5gZQIAAA4HAAARAAAAAAAAAAAAAAAAAAcJAAB3b3JkL2RvY3VtZW50LnhtbFBLAQItABQABgAIAAAAIQBtTVmrIQYAAI4aAAAVAAAAAAAAAAAAAAAAAJsLAAB3b3JkL3RoZW1lL3RoZW1lMS54bWxQSwECLQAKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAAAAAAAAAAAAAADvEQAAZG9jUHJvcHMvdGh1bWJuYWlsLmpwZWdQSwECLQAUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAAAAAAAAAAAAAAAYOgAAd29yZC9zZXR0aW5ncy54bWxQSwECLQAUAAYACAAAACEA8Lw1AdwBAADxBQAAEgAAAAAAAAAAAAAAAADiPQAAd29yZC9mb250VGFibGUueG1sUEsBAi0AFAAGAAgAAAAhAOCLylUfAQAAEQIAABQAAAAAAAAAAAAAAAAA7j8AAHdvcmQvd2ViU2V0dGluZ3MueG1sUEsBAi0AFAAGAAgAAAAhABZNBGBtAQAA7wIAABEAAAAAAAAAAAAAAAAAP0EAAGRvY1Byb3BzL2NvcmUueG1sUEsBAi0AFAAGAAgAAAAhAIGW/TkyCwAAZHIAAA8AAAAAAAAAAAAAAAAA40MAAHdvcmQvc3R5bGVzLnhtbFBLAQItABQABgAIAAAAIQBA/tAsaQEAALcCAAAQAAAAAAAAAAAAAAAAAEJPAABkb2NQcm9wcy9hcHAueG1sUEsFBgAAAAAMAAwABgMAAOFRAAAAAA==" }
-
- - do:
- search:
- index: test
- body:
- stored_fields: [file.content, file.author, file.date, file.content_length, file.content_type]
- - match: { hits.total: 1 }
- - match: { hits.hits.0.fields: {
- file.content: ["Test elasticsearch\n"],
- file.author: ["David Pilato"],
- file.date: ["2016-03-10T08:24:00Z"],
- file.content_length: ["21757"],
- file.content_type: ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"]
- }
- }
-
diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
index a31692a6ba..418d81fcf1 100644
--- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
+++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
@@ -162,9 +162,4 @@ public class Murmur3FieldMapper extends FieldMapper {
}
}
- @Override
- public boolean isGenerated() {
- return true;
- }
-
}
diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java
index 16865eb98b..e10fdb72ff 100644
--- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java
+++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java
@@ -22,7 +22,10 @@ package org.elasticsearch.index.mapper.murmur3;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
@@ -30,12 +33,19 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.indices.mapper.MapperRegistry;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+import org.elasticsearch.test.VersionUtils;
import org.junit.Before;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.hamcrest.Matchers.containsString;
+
public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry;
@@ -49,7 +59,12 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {
Collections.singletonMap(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser()),
Collections.emptyMap());
parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
- indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext);
+ }
+
+ @Override
+ protected Collection<Class<? extends Plugin>> getPlugins() {
+ return pluginList(InternalSettingsPlugin.class);
}
public void testDefaults() throws Exception {
@@ -120,4 +135,27 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase {
assertTrue(e.getMessage().contains("Setting [index] cannot be modified"));
}
}
+
+ public void testEmptyName() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("")
+ .field("type", "murmur3")
+ .endObject().endObject().endObject().endObject().string();
+
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(mapping))
+ );
+ assertThat(e.getMessage(), containsString("name cannot be empty string"));
+
+ // before 5.x
+ Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
+ Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
+ IndexService indexService2x = createIndex("test_old", oldIndexSettings);
+
+ DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.getIndexAnalyzers(),
+ indexService2x.similarityService(), mapperRegistry, indexService2x::newQueryShardContext);
+
+ DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, defaultMapper.mappingSource().string());
+ }
}
diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
index 9e3799a5d5..c4de761c8e 100644
--- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
+++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
@@ -108,7 +108,7 @@ public class SizeMappingIT extends ESIntegTestCase {
final String source = "{\"f\":10}";
indexRandom(true,
client().prepareIndex("test", "type", "1").setSource(source));
- GetResponse getResponse = client().prepareGet("test", "type", "1").setFields("_size").get();
+ GetResponse getResponse = client().prepareGet("test", "type", "1").setStoredFields("_size").get();
assertNotNull(getResponse.getField("_size"));
assertEquals(source.length(), getResponse.getField("_size").getValue());
}
diff --git a/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yaml b/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yaml
index 9e76645e26..f9a41e5215 100644
--- a/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yaml
+++ b/plugins/mapper-size/src/test/resources/rest-api-spec/test/mapper_size/10_basic.yaml
@@ -26,7 +26,7 @@
index: test
type: type1
id: 1
- fields: "_size"
+ stored_fields: "_size"
- match: { _size: 13 }
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java
index 8cb7b9085e..05bb911476 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java
@@ -20,12 +20,12 @@
package org.elasticsearch.cloud.azure.blobstore;
import com.microsoft.azure.storage.StorageException;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.blobstore.BlobMetaData;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.repositories.RepositoryException;
@@ -43,7 +43,7 @@ import java.util.Map;
*/
public class AzureBlobContainer extends AbstractBlobContainer {
- protected final ESLogger logger = Loggers.getLogger(AzureBlobContainer.class);
+ protected final Logger logger = Loggers.getLogger(AzureBlobContainer.class);
protected final AzureBlobStore blobStore;
protected final String keyPath;
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java
index 4e5dfb3efd..cd201e7ff5 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java
@@ -27,6 +27,8 @@ import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
import com.microsoft.azure.storage.blob.ListBlobItem;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobMetaData;
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
@@ -172,7 +174,7 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS
logger.trace("creating container [{}]", container);
blobContainer.createIfNotExists();
} catch (IllegalArgumentException e) {
- logger.trace("fails creating container [{}]", e, container);
+ logger.trace((Supplier<?>) () -> new ParameterizedMessage("fails creating container [{}]", container), e);
throw new RepositoryException(container, e.getMessage());
}
}
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java
index fcd7bf96b2..5b938fce18 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java
@@ -19,15 +19,8 @@
package org.elasticsearch.plugin.repository.azure;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
import org.elasticsearch.cloud.azure.storage.AzureStorageService;
import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@@ -36,6 +29,11 @@ import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.azure.AzureRepository;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
/**
* A plugin to add a repository type that writes to and from the Azure cloud storage service.
*/
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java
index 9ac15cae5a..808ae13e67 100644
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java
@@ -28,11 +28,14 @@ import org.elasticsearch.cloud.azure.AbstractAzureRepositoryServiceIntegTestCase
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.snapshots.SnapshotInfo;
+import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
@ESIntegTestCase.ClusterScope(
scope = ESIntegTestCase.Scope.SUITE,
@@ -70,14 +73,20 @@ public class AzureSnapshotRestoreServiceIntegTests extends AbstractAzureReposito
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().totalHits(), equalTo(100L));
logger.info("--> snapshot");
- CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
- .setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
- assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
- assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
- equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
-
- assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(),
- equalTo(SnapshotState.SUCCESS));
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
+ .prepareCreateSnapshot("test-repo", "test-snap")
+ .setWaitForCompletion(true)
+ .setIndices("test-idx-*", "-test-idx-3")
+ .get();
+
+ final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
+ if (snapshotInfo.shardFailures() != null) {
+ for (SnapshotShardFailure shardFailure : snapshotInfo.shardFailures()) {
+ logger.warn("shard failure during snapshot: {}", shardFailure::toString);
+ }
+ }
+ assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
+ assertEquals(snapshotInfo.failedShards(), 0);
logger.info("--> delete some data");
for (int i = 0; i < 50; i++) {
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 544e040773..8cfb5043b6 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -362,4 +362,9 @@ thirdPartyAudit.excludes = [
// optional dependencies of slf4j-api
'org.slf4j.impl.StaticMDCBinder',
'org.slf4j.impl.StaticMarkerBinder',
+
+ 'org.apache.log4j.AppenderSkeleton',
+ 'org.apache.log4j.AsyncAppender',
+ 'org.apache.log4j.helpers.ISO8601DateFormat',
+ 'org.apache.log4j.spi.ThrowableInformation'
]
diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle
index a6610178ce..b136990867 100644
--- a/plugins/repository-s3/build.gradle
+++ b/plugins/repository-s3/build.gradle
@@ -48,6 +48,12 @@ dependencyLicenses {
mapping from: /jaxb-.*/, to: 'jaxb'
}
+bundlePlugin {
+ from('config/repository-s3') {
+ into 'config'
+ }
+}
+
test {
// this is needed for insecure plugins, remove if possible!
systemProperty 'tests.artifact', project.name
diff --git a/plugins/repository-s3/config/repository-s3/log4j2.properties b/plugins/repository-s3/config/repository-s3/log4j2.properties
new file mode 100644
index 0000000000..3fee57ce3e
--- /dev/null
+++ b/plugins/repository-s3/config/repository-s3/log4j2.properties
@@ -0,0 +1,8 @@
+logger.com_amazonaws.name = com.amazonaws
+logger.com_amazonaws.level = warn
+
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
+
+logger_com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
+logger_com_amazonaws_metrics_AwsSdkMetrics.level = error
diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
index 5c02671e5e..c1c36031b5 100644
--- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
+++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java
@@ -21,12 +21,12 @@ package org.elasticsearch.cloud.aws;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.SignerFactory;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
public class AwsSigner {
- private static final ESLogger logger = Loggers.getLogger(AwsSigner.class);
+ private static final Logger logger = Loggers.getLogger(AwsSigner.class);
private AwsSigner() {
diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java
index c4d8a63adc..a9091788f2 100644
--- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java
+++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java
@@ -29,11 +29,11 @@ import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.s3.S3Repository;
@@ -85,7 +85,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements
return client;
}
- public static ClientConfiguration buildConfiguration(ESLogger logger, Settings settings, Protocol protocol, Integer maxRetries,
+ public static ClientConfiguration buildConfiguration(Logger logger, Settings settings, Protocol protocol, Integer maxRetries,
String endpoint, boolean useThrottleRetries) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
// the response metadata cache is only there for diagnostics purposes,
@@ -122,7 +122,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements
return clientConfiguration;
}
- public static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings, Settings repositorySettings) {
+ public static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings, Settings repositorySettings) {
AWSCredentialsProvider credentials;
String key = getValue(repositorySettings, settings,
S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING);
@@ -140,7 +140,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements
return credentials;
}
- protected static String findEndpoint(ESLogger logger, Settings settings, String endpoint, String region) {
+ protected static String findEndpoint(Logger logger, Settings settings, String endpoint, String region) {
if (Strings.isNullOrEmpty(endpoint)) {
logger.debug("no repository level endpoint has been defined. Trying to guess from repository region [{}]", region);
if (!region.isEmpty()) {
diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java
index dd278a9231..ef9b25b2d1 100644
--- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java
+++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/DefaultS3OutputStream.java
@@ -31,7 +31,7 @@ import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
import com.amazonaws.util.Base64;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -64,7 +64,7 @@ import java.util.List;
public class DefaultS3OutputStream extends S3OutputStream {
private static final ByteSizeValue MULTIPART_MAX_SIZE = new ByteSizeValue(5, ByteSizeUnit.GB);
- private static final ESLogger logger = Loggers.getLogger("cloud.aws");
+ private static final Logger logger = Loggers.getLogger("cloud.aws");
/**
* Multipart Upload API data
*/
diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java
index b5abb361be..b1471e417f 100644
--- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java
+++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java
@@ -19,7 +19,6 @@
package org.elasticsearch.repositories.s3;
-import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import org.elasticsearch.cloud.aws.AwsS3Service;
diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java
index 31682ee4de..37087db386 100644
--- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java
+++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java
@@ -28,8 +28,8 @@ import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@@ -49,7 +49,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble;
*/
public class TestAmazonS3 extends AmazonS3Wrapper {
- protected final ESLogger logger = Loggers.getLogger(getClass());
+ protected final Logger logger = Loggers.getLogger(getClass());
private double writeFailureRate = 0.0;
private double readFailureRate = 0.0;
diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java
index d9d15ce0b3..4d7f30ed9d 100644
--- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java
+++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java
@@ -24,6 +24,8 @@ import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
@@ -511,7 +513,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
client.deleteObjects(multiObjectDeleteRequest);
}
} catch (Exception ex) {
- logger.warn("Failed to delete S3 repository [{}] in [{}]", ex, bucketName, region);
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}] in [{}]", bucketName, region), ex);
}
}
}
diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle
index 18b7ae3c9e..6e1beef51c 100644
--- a/qa/backwards-5.0/build.gradle
+++ b/qa/backwards-5.0/build.gradle
@@ -18,6 +18,6 @@ integTest {
cluster {
numNodes = 2
numBwcNodes = 1
- bwcVersion = "5.0.0-alpha6-SNAPSHOT" // this is the same as the current version until we released the first RC
+ bwcVersion = "6.0.0-alpha1-SNAPSHOT" // this is the same as the current version until we released the first RC
}
}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java
index 8bd2451da5..23da3a99df 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java
@@ -38,7 +38,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
- (foreground, pidFile, esSettings) -> {
+ (foreground, pidFile, quiet, esSettings) -> {
assertThat(esSettings.size(), equalTo(1));
assertThat(esSettings, hasEntry("path.home", value));
});
@@ -49,7 +49,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
- (foreground, pidFile, esSettings) -> {
+ (foreground, pidFile, quiet, esSettings) -> {
assertThat(esSettings.size(), equalTo(1));
assertThat(esSettings, hasEntry("path.home", commandLineValue));
},
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/cluster/metadata/EvilSystemPropertyTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/cluster/metadata/EvilSystemPropertyTests.java
new file mode 100644
index 0000000000..5e44fdbefa
--- /dev/null
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/cluster/metadata/EvilSystemPropertyTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESTestCase;
+
+public class EvilSystemPropertyTests extends ESTestCase {
+
+ @SuppressForbidden(reason = "manipulates system properties for testing")
+ public void testMaxNumShards() {
+ IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
+ IndexMetaData.buildNumberOfShardsSetting()
+ .get(Settings.builder().put("index.number_of_shards", 1025).build()));
+ assertEquals("Failed to parse value [1025] for setting [index.number_of_shards] must be <= 1024", exception.getMessage());
+
+ Integer numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(Settings.builder().put("index.number_of_shards", 100).build());
+ assertEquals(100, numShards.intValue());
+ int limit = randomIntBetween(1, 10);
+ System.setProperty("es.index.max_number_of_shards", Integer.toString(limit));
+ try {
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
+ IndexMetaData.buildNumberOfShardsSetting()
+ .get(Settings.builder().put("index.number_of_shards", 11).build()));
+ assertEquals("Failed to parse value [11] for setting [index.number_of_shards] must be <= " + limit, e.getMessage());
+ } finally {
+ System.clearProperty("es.index.max_number_of_shards");
+ }
+ }
+}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java
new file mode 100644
index 0000000000..7ee2120c36
--- /dev/null
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.elasticsearch.cli.UserException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.hasToString;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class EvilLoggerConfigurationTests extends ESTestCase {
+
+ @Override
+ public void tearDown() throws Exception {
+ LoggerContext context = (LoggerContext) LogManager.getContext(false);
+ Configurator.shutdown(context);
+ super.tearDown();
+ }
+
+ public void testResolveMultipleConfigs() throws Exception {
+ final Level level = ESLoggerFactory.getLogger("test").getLevel();
+ try {
+ final Path configDir = getDataPath("config");
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ final Environment environment = new Environment(settings);
+ LogConfigurator.configure(environment);
+
+ {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ final LoggerConfig loggerConfig = config.getLoggerConfig("test");
+ final Appender appender = loggerConfig.getAppenders().get("console");
+ assertThat(appender, notNullValue());
+ }
+
+ {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ final LoggerConfig loggerConfig = config.getLoggerConfig("second");
+ final Appender appender = loggerConfig.getAppenders().get("console2");
+ assertThat(appender, notNullValue());
+ }
+
+ {
+ final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+ final Configuration config = ctx.getConfiguration();
+ final LoggerConfig loggerConfig = config.getLoggerConfig("third");
+ final Appender appender = loggerConfig.getAppenders().get("console3");
+ assertThat(appender, notNullValue());
+ }
+ } finally {
+ Configurator.setLevel("test", level);
+ }
+ }
+
+ public void testDefaults() throws IOException, UserException {
+ final Path configDir = getDataPath("config");
+ final String level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR).toString();
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .put("logger.level", level)
+ .build();
+ final Environment environment = new Environment(settings);
+ LogConfigurator.configure(environment);
+
+ final String loggerName = "test";
+ final Logger logger = ESLoggerFactory.getLogger(loggerName);
+ assertThat(logger.getLevel().toString(), equalTo(level));
+ }
+
+ // tests that custom settings are not overwritten by settings in the config file
+ public void testResolveOrder() throws Exception {
+ final Path configDir = getDataPath("config");
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .put("logger.test_resolve_order", "TRACE")
+ .build();
+ final Environment environment = new Environment(settings);
+ LogConfigurator.configure(environment);
+
+ // args should overwrite whatever is in the config
+ final String loggerName = "test_resolve_order";
+ final Logger logger = ESLoggerFactory.getLogger(loggerName);
+ assertTrue(logger.isTraceEnabled());
+ }
+
+ public void testHierarchy() throws Exception {
+ final Path configDir = getDataPath("hierarchy");
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ final Environment environment = new Environment(settings);
+ LogConfigurator.configure(environment);
+
+ assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(Level.TRACE));
+ assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG));
+
+ final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR);
+ Loggers.setLevel(ESLoggerFactory.getLogger("x"), level);
+
+ assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level));
+ assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level));
+ }
+
+ public void testMissingConfigFile() {
+ final Path configDir = getDataPath("does_not_exist");
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ final Environment environment = new Environment(settings);
+ UserException e = expectThrows(UserException.class, () -> LogConfigurator.configure(environment));
+ assertThat(e, hasToString(containsString("no log4j2.properties found; tried")));
+ }
+
+}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java
new file mode 100644
index 0000000000..dd04d2de50
--- /dev/null
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.ConsoleAppender;
+import org.apache.logging.log4j.core.appender.CountingNoOpAppender;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.cli.UserException;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.hamcrest.RegexMatcher;
+
+import javax.management.MBeanServerPermission;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.AccessControlException;
+import java.security.Permission;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.startsWith;
+
+public class EvilLoggerTests extends ESTestCase {
+
+ @Override
+ public void tearDown() throws Exception {
+ LoggerContext context = (LoggerContext) LogManager.getContext(false);
+ Configurator.shutdown(context);
+ super.tearDown();
+ }
+
+ public void testLocationInfoTest() throws IOException, UserException {
+ setupLogging("location_info");
+
+ final Logger testLogger = ESLoggerFactory.getLogger("test");
+
+ testLogger.error("This is an error message");
+ testLogger.warn("This is a warning message");
+ testLogger.info("This is an info message");
+ testLogger.debug("This is a debug message");
+ testLogger.trace("This is a trace message");
+ final String path = System.getProperty("es.logs") + ".log";
+ final List<String> events = Files.readAllLines(PathUtils.get(path));
+ assertThat(events.size(), equalTo(5));
+ final String location = "org.elasticsearch.common.logging.EvilLoggerTests.testLocationInfoTest";
+ // the first message is a warning for unsupported configuration files
+ assertLogLine(events.get(0), Level.ERROR, location, "This is an error message");
+ assertLogLine(events.get(1), Level.WARN, location, "This is a warning message");
+ assertLogLine(events.get(2), Level.INFO, location, "This is an info message");
+ assertLogLine(events.get(3), Level.DEBUG, location, "This is a debug message");
+ assertLogLine(events.get(4), Level.TRACE, location, "This is a trace message");
+ }
+
+ public void testDeprecationLogger() throws IOException, UserException {
+ setupLogging("deprecation");
+
+ final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger("deprecation"));
+
+ deprecationLogger.deprecated("This is a deprecation message");
+ final String deprecationPath = System.getProperty("es.logs") + "_deprecation.log";
+ final List<String> deprecationEvents = Files.readAllLines(PathUtils.get(deprecationPath));
+ assertThat(deprecationEvents.size(), equalTo(1));
+ assertLogLine(
+ deprecationEvents.get(0),
+ Level.WARN,
+ "org.elasticsearch.common.logging.DeprecationLogger.deprecated",
+ "This is a deprecation message");
+ }
+
+ public void testFindAppender() throws IOException, UserException {
+ setupLogging("find_appender");
+
+ final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender");
+
+ final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class);
+ assertNotNull(testLoggerConsoleAppender);
+ assertThat(testLoggerConsoleAppender.getName(), equalTo("console"));
+ final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender");
+ assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class));
+ final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class);
+ assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op"));
+ }
+
+ public void testPrefixLogger() throws IOException, IllegalAccessException, UserException {
+ setupLogging("prefix");
+
+ final String prefix = randomBoolean() ? null : randomAsciiOfLength(16);
+ final Logger logger = Loggers.getLogger("prefix", prefix);
+ logger.info("test");
+ logger.info("{}", "test");
+ final Exception e = new Exception("exception");
+ logger.info(new ParameterizedMessage("{}", "test"), e);
+
+ final String path = System.getProperty("es.logs") + ".log";
+ final List<String> events = Files.readAllLines(PathUtils.get(path));
+
+ final StringWriter sw = new StringWriter();
+ final PrintWriter pw = new PrintWriter(sw);
+ e.printStackTrace(pw);
+ final int stackTraceLength = sw.toString().split(System.getProperty("line.separator")).length;
+ final int expectedLogLines = 3;
+ assertThat(events.size(), equalTo(expectedLogLines + stackTraceLength));
+ for (int i = 0; i < expectedLogLines; i++) {
+ if (prefix == null) {
+ assertThat(events.get(i), startsWith("test"));
+ } else {
+ assertThat(events.get(i), startsWith("[" + prefix + "] test"));
+ }
+ }
+ }
+
+
+ public void testLog4jShutdownHack() {
+ final AtomicBoolean denied = new AtomicBoolean();
+ final SecurityManager sm = System.getSecurityManager();
+ try {
+ System.setSecurityManager(new SecurityManager() {
+ @Override
+ public void checkPermission(Permission perm) {
+ // just grant all permissions to Log4j, except we deny MBeanServerPermission
+ // "createMBeanServer" as this will trigger the Log4j bug
+ if (perm instanceof MBeanServerPermission && "createMBeanServer".equals(perm.getName())) {
+ // without the hack in place, Log4j will try to get an MBean server which we will deny
+ // with the hack in place, this permission should never be requested by Log4j
+ denied.set(true);
+ throw new AccessControlException("denied");
+ }
+ }
+
+ @Override
+ public void checkPropertyAccess(String key) {
+ /*
+ * grant access to all properties; this is so that Log4j can check if its usage
+ * of JMX is disabled or not by reading log4j2.disable.jmx but there are other
+ * properties that Log4j will try to read as well and its simpler to just grant
+ * them all
+ */
+ }
+ });
+
+ // this will trigger the bug without the hack
+ LoggerContext context = (LoggerContext) LogManager.getContext(false);
+ Configurator.shutdown(context);
+
+ // Log4j should have never requested permissions to create an MBean server
+ assertFalse(denied.get());
+ } finally {
+ System.setSecurityManager(sm);
+ }
+ }
+
+ private void setupLogging(final String config) throws IOException, UserException {
+ final Path configDir = getDataPath(config);
+ // need to set custom path.conf so we can use a custom log4j2.properties file for the test
+ final Settings settings = Settings.builder()
+ .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ final Environment environment = new Environment(settings);
+ LogConfigurator.configure(environment);
+ }
+
+ private void assertLogLine(final String logLine, final Level level, final String location, final String message) {
+ final Matcher matcher = Pattern.compile("\\[(.*)\\]\\[(.*)\\(.*\\)\\] (.*)").matcher(logLine);
+ assertTrue(logLine, matcher.matches());
+ assertThat(matcher.group(1), equalTo(level.toString()));
+ assertThat(matcher.group(2), RegexMatcher.matches(location));
+ assertThat(matcher.group(3), RegexMatcher.matches(message));
+ }
+
+}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java
index af1f311dd2..6b930c1711 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java
@@ -291,6 +291,12 @@ public class InstallPluginCommandTests extends ESTestCase {
}
}
+ public void testMissingPluginId() throws IOException {
+ final Tuple<Path, Environment> env = createEnv(fs, temp);
+ final UserException e = expectThrows(UserException.class, () -> installPlugin(null, env.v1()));
+ assertTrue(e.getMessage(), e.getMessage().contains("plugin id is required"));
+ }
+
public void testSomethingWorks() throws Exception {
Tuple<Path, Environment> env = createEnv(fs, temp);
Path pluginDir = createPluginDir(temp);
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java
index 1422280165..ddac8f6620 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java
@@ -59,7 +59,7 @@ public class ListPluginsCommandTests extends ESTestCase {
static MockTerminal listPlugins(Path home) throws Exception {
return listPlugins(home, new String[0]);
}
-
+
static MockTerminal listPlugins(Path home, String[] args) throws Exception {
String[] argsAndHome = new String[args.length + 1];
System.arraycopy(args, 0, argsAndHome, 0, args.length);
@@ -69,16 +69,16 @@ public class ListPluginsCommandTests extends ESTestCase {
assertEquals(ExitCodes.OK, status);
return terminal;
}
-
+
static String buildMultiline(String... args){
return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n"));
}
-
- static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException {
+
+ static void buildFakePlugin(Environment env, String description, String name, String classname, String version) throws IOException {
PluginTestUtil.writeProperties(env.pluginsFile().resolve(name),
"description", description,
"name", name,
- "version", "1.0",
+ "version", version,
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"classname", classname);
@@ -97,51 +97,51 @@ public class ListPluginsCommandTests extends ESTestCase {
}
public void testOnePlugin() throws Exception {
- buildFakePlugin(env, "fake desc", "fake", "org.fake");
+ buildFakePlugin(env, "fake desc", "fake", "org.fake", "1.0.0");
MockTerminal terminal = listPlugins(home);
- assertEquals(terminal.getOutput(), buildMultiline("fake"));
+ assertEquals(terminal.getOutput(), buildMultiline("fake@1.0.0"));
}
public void testTwoPlugins() throws Exception {
- buildFakePlugin(env, "fake desc", "fake1", "org.fake");
- buildFakePlugin(env, "fake desc 2", "fake2", "org.fake");
+ buildFakePlugin(env, "fake desc", "fake1", "org.fake", "1.2.3");
+ buildFakePlugin(env, "fake desc 2", "fake2", "org.fake", "6.5.4");
MockTerminal terminal = listPlugins(home);
- assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2"));
+ assertEquals(terminal.getOutput(), buildMultiline("fake1@1.2.3", "fake2@6.5.4"));
}
-
+
public void testPluginWithVerbose() throws Exception {
- buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake");
+ buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake", "1.0.0");
String[] params = { "-v" };
MockTerminal terminal = listPlugins(home, params);
- assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin",
- "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake"));
+ assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin@1.0.0",
+ "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0.0", " * Classname: org.fake"));
}
-
+
public void testPluginWithVerboseMultiplePlugins() throws Exception {
- buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake");
- buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2");
+ buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.2.3");
+ buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "6.5.4");
String[] params = { "-v" };
MockTerminal terminal = listPlugins(home, params);
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(),
- "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0",
- " * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2",
- "Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2"));
+ "fake_plugin1@1.2.3", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.2.3",
+ " * Classname: org.fake", "fake_plugin2@6.5.4", "- Plugin information:", "Name: fake_plugin2",
+ "Description: fake desc 2", "Version: 6.5.4", " * Classname: org.fake2"));
}
-
+
public void testPluginWithoutVerboseMultiplePlugins() throws Exception {
- buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake");
- buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2");
+ buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.0.0");
+ buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "1.0.0");
MockTerminal terminal = listPlugins(home, new String[0]);
String output = terminal.getOutput();
- assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2"));
+ assertEquals(output, buildMultiline("fake_plugin1@1.0.0", "fake_plugin2@1.0.0"));
}
-
+
public void testPluginWithoutDescriptorFile() throws Exception{
Files.createDirectories(env.pluginsFile().resolve("fake1"));
NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home));
assertEquals(e.getFile(), env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString());
}
-
+
public void testPluginWithWrongDescriptorFile() throws Exception{
PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake1"),
"description", "fake desc");
@@ -149,5 +149,5 @@ public class ListPluginsCommandTests extends ESTestCase {
assertEquals(e.getMessage(), "Property [name] is missing in [" +
env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + "]");
}
-
+
}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java
index e2910be64f..ab4f00492b 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java
@@ -19,6 +19,14 @@
package org.elasticsearch.plugins;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.cli.MockTerminal;
+import org.elasticsearch.cli.UserException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.Before;
+
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
@@ -26,13 +34,8 @@ import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
-import org.apache.lucene.util.LuceneTestCase;
-import org.elasticsearch.cli.UserException;
-import org.elasticsearch.cli.MockTerminal;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.test.ESTestCase;
-import org.junit.Before;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.not;
@LuceneTestCase.SuppressFileSystems("*")
public class RemovePluginCommandTests extends ESTestCase {
@@ -109,4 +112,26 @@ public class RemovePluginCommandTests extends ESTestCase {
assertRemoveCleaned(env);
}
+ public void testConfigDirPreserved() throws Exception {
+ Files.createDirectories(env.pluginsFile().resolve("fake"));
+ final Path configDir = env.configFile().resolve("fake");
+ Files.createDirectories(configDir);
+ Files.createFile(configDir.resolve("fake.yml"));
+ final MockTerminal terminal = removePlugin("fake", home);
+ assertTrue(Files.exists(env.configFile().resolve("fake")));
+ assertThat(terminal.getOutput(), containsString(expectedConfigDirPreservedMessage(configDir)));
+ assertRemoveCleaned(env);
+ }
+
+ public void testNoConfigDirPreserved() throws Exception {
+ Files.createDirectories(env.pluginsFile().resolve("fake"));
+ final Path configDir = env.configFile().resolve("fake");
+ final MockTerminal terminal = removePlugin("fake", home);
+ assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir))));
+ }
+
+ private String expectedConfigDirPreservedMessage(final Path configDir) {
+ return "-> Preserving plugin config files [" + configDir + "] in case of upgrade, delete manually if not needed";
+ }
+
}
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
index 247caa4221..0171dfb99d 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
@@ -30,6 +30,7 @@ import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.InternalTestCluster;
@@ -38,6 +39,7 @@ import org.junit.BeforeClass;
import java.io.IOException;
import java.nio.file.Path;
+import java.util.Collections;
import static org.hamcrest.CoreMatchers.either;
import static org.hamcrest.CoreMatchers.equalTo;
@@ -55,7 +57,7 @@ public class TribeUnitTests extends ESTestCase {
@BeforeClass
- public static void createTribes() {
+ public static void createTribes() throws NodeValidationException {
Settings baseSettings = Settings.builder()
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put("transport.type", "local")
@@ -70,14 +72,14 @@ public class TribeUnitTests extends ESTestCase {
.put("cluster.name", "tribe1")
.put("node.name", "tribe1_node")
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
- .build()).start();
+ .build(), Collections.emptyList()).start();
tribe2 = new TribeClientNode(
Settings.builder()
.put(baseSettings)
.put("cluster.name", "tribe2")
.put("node.name", "tribe2_node")
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
- .build()).start();
+ .build(), Collections.emptyList()).start();
}
@AfterClass
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties
new file mode 100644
index 0000000000..aca53f81c1
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties
@@ -0,0 +1,34 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+appender.file.type = File
+appender.file.name = file
+appender.file.fileName = ${sys:es.logs}.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = [%p][%l] %marker%m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.file.ref = file
+
+logger.test.name = test
+logger.test.level = trace
+logger.test.appenderRef.console.ref = console
+logger.test.appenderRef.file.ref = file
+logger.test.additivity = false
+
+appender.deprecation_file.type = File
+appender.deprecation_file.name = deprecation_file
+appender.deprecation_file.fileName = ${sys:es.logs}_deprecation.log
+appender.deprecation_file.layout.type = PatternLayout
+appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
+
+logger.deprecation.name = deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_file.ref = deprecation_file
+logger.deprecation.additivity = false
+
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties
new file mode 100644
index 0000000000..9d59e79c08
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/second/log4j2.properties
@@ -0,0 +1,8 @@
+appender.console2.type = Console
+appender.console2.name = console2
+appender.console2.layout.type = PatternLayout
+appender.console2.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marke%m%n
+
+logger.second.name = second
+logger.second.level = debug
+logger.second.appenderRef.console2.ref = console2
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties
new file mode 100644
index 0000000000..ed794cb7c3
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/third/log4j2.properties
@@ -0,0 +1,8 @@
+appender.console3.type = Console
+appender.console3.name = console3
+appender.console3.layout.type = PatternLayout
+appender.console3.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+logger.third.name = third
+logger.third.level = debug
+logger.third.appenderRef.console3.ref = console3
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties
new file mode 100644
index 0000000000..744e554ff9
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties
@@ -0,0 +1,27 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+appender.file.type = File
+appender.file.name = file
+appender.file.fileName = ${sys:es.logs}.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = [%p][%l] %marker%m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.file.ref = file
+
+appender.deprecation_file.type = File
+appender.deprecation_file.name = deprecation_file
+appender.deprecation_file.fileName = ${sys:es.logs}_deprecation.log
+appender.deprecation_file.layout.type = PatternLayout
+appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
+
+logger.deprecation.name = deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_file.ref = deprecation_file
+logger.deprecation.additivity = false
diff --git a/plugins/mapper-attachments/licenses/bcmail-jdk15on-NOTICE.txt b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here
index e69de29bb2..e69de29bb2 100644
--- a/plugins/mapper-attachments/licenses/bcmail-jdk15on-NOTICE.txt
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties
new file mode 100644
index 0000000000..80a2877162
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties
@@ -0,0 +1,17 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+logger.has_console_appender.name = has_console_appender
+logger.has_console_appender.level = trace
+logger.has_console_appender.appenderRef.console.ref = console
+logger.has_console_appender.additivity = false
+
+appender.counting_no_op.type = CountingNoOp
+appender.counting_no_op.name = counting_no_op
+
+logger.has_counting_no_op_appender.name = has_counting_no_op_appender
+logger.has_counting_no_op_appender.appenderRef.counting_no_op.ref = counting_no_op
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties
new file mode 100644
index 0000000000..622f632f4b
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties
@@ -0,0 +1,20 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.file.ref = file
+
+logger.x.name = x
+logger.x.level = trace
+logger.x.appenderRef.console.ref = console
+logger.x.additivity = false
+
+logger.x_y.name = x.y
+logger.x_y.level = debug
+logger.x_y.appenderRef.console.ref = console
+logger.x_y.additivity = false
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties
new file mode 100644
index 0000000000..d1a2c534b8
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties
@@ -0,0 +1,22 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+appender.file.type = File
+appender.file.name = file
+appender.file.fileName = ${sys:es.logs}.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = [%p][%l] %marker%m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.file.ref = file
+
+logger.test.name = test
+logger.test.level = trace
+logger.test.appenderRef.console.ref = console
+logger.test.appenderRef.file.ref = file
+logger.test.additivity = false
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/prefix/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/prefix/log4j2.properties
new file mode 100644
index 0000000000..1f18b38d91
--- /dev/null
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/prefix/log4j2.properties
@@ -0,0 +1,20 @@
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
+
+appender.file.type = File
+appender.file.name = file
+appender.file.fileName = ${sys:es.logs}.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = %marker%m%n
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.file.ref = file
+
+logger.prefix.name = prefix
+logger.prefix.level = info
+logger.prefix.appenderRef.console.ref = console
+logger.prefix.appenderRef.file.ref = file
+logger.prefix.additivity = false
diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle
new file mode 100644
index 0000000000..f90763a12d
--- /dev/null
+++ b/qa/rolling-upgrade/build.gradle
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.test.RestIntegTestTask
+
+apply plugin: 'elasticsearch.standalone-test'
+
+task oldClusterTest(type: RestIntegTestTask) {
+ mustRunAfter(precommit)
+ cluster {
+ distribution = 'zip'
+ // TODO: Right now, this just forms a cluster with the current version of ES,
+ // because we don't support clusters with nodes on different alpha/beta releases of ES.
+ // When the GA is released, we should change the bwcVersion to 5.0.0 and uncomment
+ // numBwcNodes = 2
+ //bwcVersion = '5.0.0-alpha5' // TODO: either randomize, or make this settable with sysprop
+ //numBwcNodes = 2
+ numNodes = 2
+ clusterName = 'rolling-upgrade'
+ }
+ systemProperty 'tests.rest.suite', 'old_cluster'
+}
+
+task mixedClusterTest(type: RestIntegTestTask) {
+ dependsOn(oldClusterTest, 'oldClusterTest#node1.stop')
+ cluster {
+ distribution = 'zip'
+ clusterName = 'rolling-upgrade'
+ unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
+ dataDir = "${-> oldClusterTest.nodes[1].dataDir}"
+ }
+ systemProperty 'tests.rest.suite', 'mixed_cluster'
+ finalizedBy 'oldClusterTest#node0.stop'
+}
+
+task upgradedClusterTest(type: RestIntegTestTask) {
+ dependsOn(mixedClusterTest, 'oldClusterTest#node0.stop')
+ cluster {
+ distribution = 'zip'
+ clusterName = 'rolling-upgrade'
+ unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
+ dataDir = "${-> oldClusterTest.nodes[0].dataDir}"
+ }
+ systemProperty 'tests.rest.suite', 'upgraded_cluster'
+ // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
+ finalizedBy 'mixedClusterTest#stop'
+}
+
+task integTest {
+ dependsOn = [upgradedClusterTest]
+}
+
+test.enabled = false // no unit tests for rolling upgrades, only the rest integration test
+
+check.dependsOn(integTest)
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
new file mode 100644
index 0000000000..496a02e42d
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.upgrades;
+
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
+import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
+import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException;
+
+import java.io.IOException;
+
+@TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs
+public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
+
+ @Override
+ protected boolean preserveIndicesUponCompletion() {
+ return true;
+ }
+
+ public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
+ return createParameters(0, 1);
+ }
+}
+
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml
new file mode 100644
index 0000000000..a2b40cc54f
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml
@@ -0,0 +1,37 @@
+---
+"Index data and search on the mixed cluster":
+ - do:
+ cluster.health:
+ wait_for_status: green
+ wait_for_nodes: 2
+
+ - do:
+ search:
+ index: test_index
+
+ - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster
+
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v1_mixed", "f2": 5}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v2_mixed", "f2": 6}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v3_mixed", "f2": 7}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v4_mixed", "f2": 8}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v5_mixed", "f2": 9}'
+
+ - do:
+ indices.flush:
+ index: test_index
+
+ - do:
+ search:
+ index: test_index
+
+ - match: { hits.total: 10 } # 5 docs from old cluster, 5 docs from mixed cluster
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml
new file mode 100644
index 0000000000..f1f90cf9d2
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml
@@ -0,0 +1,34 @@
+---
+"Index data and search on the old cluster":
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v1_old", "f2": 0}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v2_old", "f2": 1}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v3_old", "f2": 2}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v4_old", "f2": 3}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v5_old", "f2": 4}'
+
+ - do:
+ indices.flush:
+ index: test_index
+
+ - do:
+ search:
+ index: test_index
+
+ - match: { hits.total: 5 }
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml
new file mode 100644
index 0000000000..03dcdc583d
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml
@@ -0,0 +1,37 @@
+---
+"Index data and search on the upgraded cluster":
+ - do:
+ cluster.health:
+ wait_for_status: green
+ wait_for_nodes: 2
+
+ - do:
+ search:
+ index: test_index
+
+ - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters
+
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v1_upgraded", "f2": 10}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v2_upgraded", "f2": 11}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v3_upgraded", "f2": 12}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v4_upgraded", "f2": 13}'
+ - '{"index": {"_index": "test_index", "_type": "test_type"}}'
+ - '{"f1": "v5_upgraded", "f2": 14}'
+
+ - do:
+ indices.flush:
+ index: test_index
+
+ - do:
+ search:
+ index: test_index
+
+ - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
index 645cc8382d..d36c2aa04d 100644
--- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
+++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
@@ -19,11 +19,11 @@
package org.elasticsearch.smoketest;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -32,6 +32,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.transport.MockTcpTransportPlugin;
+import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.Netty4Plugin;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.After;
@@ -71,7 +72,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
*/
public static final String TESTS_CLUSTER = "tests.cluster";
- protected static final ESLogger logger = ESLoggerFactory.getLogger(ESSmokeClientTestCase.class.getName());
+ protected static final Logger logger = ESLoggerFactory.getLogger(ESSmokeClientTestCase.class.getName());
private static final AtomicInteger counter = new AtomicInteger();
private static Client client;
@@ -91,7 +92,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
break;
case 1:
plugins = Collections.emptyList();
- builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME);
+ builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME);
break;
case 2:
plugins = Collections.emptyList();
diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java
index 4c5b38ca33..6380ed90e1 100644
--- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java
+++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.smoketest;
+import org.apache.lucene.util.Constants;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
@@ -27,10 +28,19 @@ import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.greaterThan;
public class SmokeTestClientIT extends ESSmokeClientTestCase {
+
+ // needed to avoid the test suite from failing for having no tests
+ // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
+ public void testSoThatTestsDoNotFail() {
+
+ }
+
/**
* Check that we are connected to a cluster named "elasticsearch".
*/
public void testSimpleClient() {
+ // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
+ assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9);
Client client = getClient();
// START SNIPPET: java-doc-admin-cluster-health
@@ -45,6 +55,8 @@ public class SmokeTestClientIT extends ESSmokeClientTestCase {
* Create an index and index some docs
*/
public void testPutDocument() {
+ // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
+ assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9);
Client client = getClient();
// START SNIPPET: java-doc-index-doc-simple
@@ -63,5 +75,6 @@ public class SmokeTestClientIT extends ESSmokeClientTestCase {
assertThat(searchResponse.getHits().getTotalHits(), is(1L));
// END SNIPPET: java-doc-search-simple
}
+
}
diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml
index 0987ae5191..8a80dec1c0 100644
--- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml
+++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml
@@ -1,5 +1,5 @@
---
-"Test script processor with inline script":
+"Test script processor with inline script and params":
- do:
ingest.put_pipeline:
id: "my_pipeline"
@@ -10,7 +10,10 @@
{
"script" : {
"lang" : "painless",
- "inline": "ctx.bytes_total = ctx.bytes_in + ctx.bytes_out"
+ "inline": "ctx.bytes_total = (ctx.bytes_in + ctx.bytes_out) * params.factor",
+ "params": {
+ "factor": 10
+ }
}
}
]
@@ -32,7 +35,7 @@
id: 1
- match: { _source.bytes_in: 1234 }
- match: { _source.bytes_out: 4321 }
- - match: { _source.bytes_total: 5555 }
+ - match: { _source.bytes_total: 55550 }
---
"Test script processor with file script":
diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle
index 303032299b..fb7ee1d8d3 100644
--- a/qa/vagrant/build.gradle
+++ b/qa/vagrant/build.gradle
@@ -37,7 +37,8 @@ List<String> availableBoxes = [
'sles-12',
'ubuntu-1204',
'ubuntu-1404',
- 'ubuntu-1504'
+ 'ubuntu-1504',
+ 'ubuntu-1604'
]
String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample')
@@ -122,13 +123,24 @@ task stop {
Set<String> getVersions() {
Node xml
- new URL('http://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
+ new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
xml = new XmlParser().parse(s)
}
- return new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /2\.\d\.\d/ })
+
+ // List all N-1 releases from maven central
+ int major = Integer.parseInt(project.version.substring(0, project.version.indexOf('.'))) - 1
+ Set<String> versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /$major\.\d\.\d/ })
+ if (versions.isEmpty() == false) {
+ return versions;
+ }
+
+ // If no version is found, we run the tests with the current version
+ return Collections.singleton(project.version);
}
task updatePackagingTestUpgradeFromVersions {
+ group 'Verification'
+ description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.'
doLast {
Set<String> versions = getVersions()
new File(project.projectDir, 'versions').text = versions.join('\n') + '\n'
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats
index eca0c65090..7f9ce21e85 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats
@@ -115,24 +115,6 @@ setup() {
export ES_JAVA_OPTS=$es_java_opts
}
-@test "[TAR] start Elasticsearch with unquoted JSON option" {
- local es_java_opts=$ES_JAVA_OPTS
- local es_jvm_options=$ES_JVM_OPTIONS
- local temp=`mktemp -d`
- touch "$temp/jvm.options"
- chown -R elasticsearch:elasticsearch "$temp"
- echo "-Delasticsearch.json.allow_unquoted_field_names=true" >> "$temp/jvm.options"
- export ES_JVM_OPTIONS="$temp/jvm.options"
- start_elasticsearch_service
- # unquoted field name
- curl -s -XPOST localhost:9200/i/d/1 -d'{foo: "bar"}'
- [ "$?" -eq 0 ]
- curl -s -XDELETE localhost:9200/i
- stop_elasticsearch_service
- export ES_JVM_OPTIONS=$es_jvm_options
- export ES_JAVA_OPTS=$es_java_opts
-}
-
@test "[TAR] remove tar" {
rm -rf "/tmp/elasticsearch"
}
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats
index 1f4cdeeeb3..52f3de34a9 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats
@@ -127,13 +127,21 @@ setup() {
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/var/run/elasticsearch"
+ # Those directories are removed by the package manager
+ assert_file_not_exist "/usr/share/elasticsearch/bin"
+ assert_file_not_exist "/usr/share/elasticsearch/lib"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
+ assert_file_not_exist "/usr/share/elasticsearch/modules/lang-painless"
+
# The configuration files are still here
assert_file_exist "/etc/elasticsearch"
+ assert_file_exist "/etc/elasticsearch/scripts"
assert_file_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_exist "/etc/elasticsearch/jvm.options"
- assert_file_exist "/etc/elasticsearch/logging.yml"
+ assert_file_exist "/etc/elasticsearch/log4j2.properties"
# The env file is still here
assert_file_exist "/etc/default/elasticsearch"
@@ -152,9 +160,10 @@ setup() {
@test "[DEB] verify package purge" {
# all remaining files are deleted by the purge
assert_file_not_exist "/etc/elasticsearch"
+ assert_file_not_exist "/etc/elasticsearch/scripts"
assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_not_exist "/etc/elasticsearch/jvm.options"
- assert_file_not_exist "/etc/elasticsearch/logging.yml"
+ assert_file_not_exist "/etc/elasticsearch/log4j2.properties"
assert_file_not_exist "/etc/default/elasticsearch"
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats
index c47f24e6c6..50c6849e92 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats
@@ -116,12 +116,20 @@ setup() {
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/var/run/elasticsearch"
+ # Those directories are removed by the package manager
+ assert_file_not_exist "/usr/share/elasticsearch/bin"
+ assert_file_not_exist "/usr/share/elasticsearch/lib"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
+ assert_file_not_exist "/usr/share/elasticsearch/modules/lang-painless"
+
assert_file_not_exist "/etc/elasticsearch"
+ assert_file_not_exist "/etc/elasticsearch/scripts"
assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_not_exist "/etc/elasticsearch/jvm.options"
- assert_file_not_exist "/etc/elasticsearch/logging.yml"
+ assert_file_not_exist "/etc/elasticsearch/log4j2.properties"
assert_file_not_exist "/etc/init.d/elasticsearch"
assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
@@ -140,7 +148,7 @@ setup() {
@test "[RPM] reremove package" {
echo "# ping" >> "/etc/elasticsearch/elasticsearch.yml"
echo "# ping" >> "/etc/elasticsearch/jvm.options"
- echo "# ping" >> "/etc/elasticsearch/logging.yml"
+ echo "# ping" >> "/etc/elasticsearch/log4j2.properties"
echo "# ping" >> "/etc/elasticsearch/scripts/script"
rpm -e 'elasticsearch'
}
@@ -157,14 +165,20 @@ setup() {
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/var/run/elasticsearch"
+ assert_file_not_exist "/usr/share/elasticsearch/bin"
+ assert_file_not_exist "/usr/share/elasticsearch/lib"
+ assert_file_not_exist "/usr/share/elasticsearch/modules"
+ assert_file_not_exist "/usr/share/elasticsearch/modules/lang-painless"
+
assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_exist "/etc/elasticsearch/elasticsearch.yml.rpmsave"
assert_file_not_exist "/etc/elasticsearch/jvm.options"
assert_file_exist "/etc/elasticsearch/jvm.options.rpmsave"
- assert_file_not_exist "/etc/elasticsearch/logging.yml"
- assert_file_exist "/etc/elasticsearch/logging.yml.rpmsave"
+ assert_file_not_exist "/etc/elasticsearch/log4j2.properties"
+ assert_file_exist "/etc/elasticsearch/log4j2.properties.rpmsave"
# older versions of rpm behave differently and preserve the
# directory but do not append the ".rpmsave" suffix
test -e "/etc/elasticsearch/scripts" || test -e "/etc/elasticsearch/scripts.rpmsave"
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats
index da7b6a180f..857a0ba397 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats
@@ -68,9 +68,24 @@ setup() {
# starting Elasticsearch so we don't have to wait for elasticsearch to scan for
# them.
install_elasticsearch_test_scripts
+
+ # Capture the current epoch in millis
+ run date +%s
+ epoch="$output"
+
systemctl start elasticsearch.service
wait_for_elasticsearch_status
assert_file_exist "/var/run/elasticsearch/elasticsearch.pid"
+ assert_file_exist "/var/log/elasticsearch/elasticsearch.log"
+
+ # Converts the epoch back in a human readable format
+ run date --date=@$epoch "+%Y-%m-%d %H:%M:%S"
+ since="$output"
+
+ # Verifies that no new entries in journald have been added
+ # since the last start
+ result="$(journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" --output cat | wc -l)"
+ [ "$result" -eq "0" ]
}
@test "[SYSTEMD] start (running)" {
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
index 48bf2aca4e..feca52c7bb 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
@@ -37,6 +37,11 @@ load os_package
# Cleans everything for the 1st execution
setup() {
skip_not_dpkg_or_rpm
+
+ sameVersion="false"
+ if [ "$(cat upgrade_from_version)" == "$(cat version)" ]; then
+ sameVersion="true"
+ fi
}
@test "[UPGRADE] install old version" {
@@ -49,11 +54,7 @@ setup() {
}
@test "[UPGRADE] check elasticsearch version is old version" {
- curl -s localhost:9200 | grep \"number\"\ :\ \"$(cat upgrade_from_version)\" || {
- echo "Installed an unexpected version:"
- curl -s localhost:9200
- false
- }
+ check_elasticsearch_version "$(cat upgrade_from_version)"
}
@test "[UPGRADE] index some documents into a few indexes" {
@@ -79,7 +80,11 @@ setup() {
}
@test "[UPGRADE] install version under test" {
- install_package -u
+ if [ "$sameVersion" == "true" ]; then
+ install_package -f
+ else
+ install_package -u
+ fi
}
@test "[UPGRADE] start version under test" {
@@ -88,12 +93,7 @@ setup() {
}
@test "[UPGRADE] check elasticsearch version is version under test" {
- local versionToCheck=$(cat version | sed -e 's/-SNAPSHOT//')
- curl -s localhost:9200 | grep \"number\"\ :\ \"$versionToCheck\" || {
- echo "Installed an unexpected version:"
- curl -s localhost:9200
- false
- }
+ check_elasticsearch_version "$(cat version)"
}
@test "[UPGRADE] verify that the documents are there after restart" {
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache b/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache
index a72544f782..ee27df25a9 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache
+++ b/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache
@@ -2,7 +2,8 @@
"query": {
"script": {
"script": {
- "file": "is_guide"
+ "file": "is_guide",
+ "lang": "groovy"
}
}
}
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash
index b44e5885ff..b5a494fd08 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash
@@ -217,6 +217,10 @@ fi
install_and_check_plugin discovery ec2 aws-java-sdk-core-*.jar
}
+@test "[$GROUP] install discovery-file plugin" {
+ install_and_check_plugin discovery file
+}
+
@test "[$GROUP] install ingest-attachment plugin" {
# we specify the version on the poi-3.15-beta1.jar so that the test does
# not spuriously pass if the jar is missing but the other poi jars
@@ -263,10 +267,6 @@ fi
install_and_check_plugin lang python jython-standalone-*.jar
}
-@test "[$GROUP] install mapper-attachments plugin" {
- install_and_check_plugin mapper attachments
-}
-
@test "[$GROUP] install murmur3 mapper plugin" {
install_and_check_plugin mapper murmur3
}
@@ -300,7 +300,7 @@ fi
}
@test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" {
- "$ESHOME/bin/elasticsearch-plugin" list > /tmp/installed
+ "$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed
compare_plugins_list "/tmp/installed" "'plugins list'"
}
@@ -353,6 +353,10 @@ fi
remove_plugin discovery-ec2
}
+@test "[$GROUP] remove discovery-file plugin" {
+ remove_plugin discovery-file
+}
+
@test "[$GROUP] remove ingest-attachment plugin" {
remove_plugin ingest-attachment
}
@@ -373,10 +377,6 @@ fi
remove_plugin lang-python
}
-@test "[$GROUP] remove mapper-attachments plugin" {
- remove_plugin mapper-attachments
-}
-
@test "[$GROUP] remove murmur3 mapper plugin" {
remove_plugin mapper-murmur3
}
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
index ee6e491d16..1060aa7884 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
@@ -44,12 +44,16 @@ export_elasticsearch_paths() {
install_package() {
local version=$(cat version)
local rpmCommand='-i'
- while getopts ":uv:" opt; do
+ while getopts ":fuv:" opt; do
case $opt in
u)
rpmCommand='-U'
dpkgCommand='--force-confnew'
;;
+ f)
+ rpmCommand='-U --force'
+ dpkgCommand='--force-conflicts'
+ ;;
v)
version=$OPTARG
;;
@@ -79,7 +83,7 @@ verify_package_installation() {
assert_file "$ESHOME/lib" d root root 755
assert_file "$ESCONFIG" d root elasticsearch 750
assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 750
- assert_file "$ESCONFIG/logging.yml" f root elasticsearch 750
+ assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 750
assert_file "$ESSCRIPTS" d root elasticsearch 750
assert_file "$ESDATA" d elasticsearch elasticsearch 755
assert_file "$ESLOG" d elasticsearch elasticsearch 755
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
index 37daf0ae5b..137f87045d 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
@@ -452,6 +452,22 @@ wait_for_elasticsearch_status() {
}
}
+# Checks the current elasticsearch version using the Info REST endpoint
+# $1 - expected version
+check_elasticsearch_version() {
+ local version=$1
+ local versionToCheck=$(echo $version | sed -e 's/-SNAPSHOT//')
+
+ run curl -s localhost:9200
+ [ "$status" -eq 0 ]
+
+ echo $output | grep \"number\"\ :\ \"$versionToCheck\" || {
+ echo "Installed an unexpected version:"
+ curl -s localhost:9200
+ false
+ }
+}
+
install_elasticsearch_test_scripts() {
install_script is_guide.groovy
install_script is_guide.mustache
@@ -476,7 +492,8 @@ run_elasticsearch_tests() {
"query": {
"script": {
"script": {
- "file": "is_guide"
+ "file": "is_guide",
+ "lang": "groovy"
}
}
}
@@ -499,7 +516,7 @@ move_config() {
mv "$oldConfig"/* "$ESCONFIG"
chown -R elasticsearch:elasticsearch "$ESCONFIG"
assert_file_exist "$ESCONFIG/elasticsearch.yml"
- assert_file_exist "$ESCONFIG/logging.yml"
+ assert_file_exist "$ESCONFIG/log4j2.properties"
}
# Copies a script into the Elasticsearch install.
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash
index 798ec6c299..0ea86ddcc6 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash
@@ -86,7 +86,7 @@ verify_archive_installation() {
assert_file "$ESHOME/bin/elasticsearch-plugin" f
assert_file "$ESCONFIG" d
assert_file "$ESCONFIG/elasticsearch.yml" f
- assert_file "$ESCONFIG/logging.yml" f
+ assert_file "$ESCONFIG/log4j2.properties" f
assert_file "$ESHOME/lib" d
assert_file "$ESHOME/NOTICE.txt" f
assert_file "$ESHOME/LICENSE.txt" f
diff --git a/qa/vagrant/versions b/qa/vagrant/versions
index dc73cb6e2a..654a95a3a2 100644
--- a/qa/vagrant/versions
+++ b/qa/vagrant/versions
@@ -1,15 +1 @@
-2.0.0
-2.0.1
-2.0.2
-2.1.0
-2.1.1
-2.1.2
-2.2.0
-2.2.1
-2.2.2
-2.3.0
-2.3.1
-2.3.2
-2.3.3
-2.3.4
-2.3.5
+6.0.0-alpha1-SNAPSHOT
diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle
index 2a7de2c726..cd49b9f192 100644
--- a/rest-api-spec/build.gradle
+++ b/rest-api-spec/build.gradle
@@ -1,8 +1,7 @@
-apply plugin: 'java'
-apply plugin: 'com.bmuschko.nexus'
+apply plugin: 'elasticsearch.build'
+apply plugin: 'nebula.maven-base-publish'
+apply plugin: 'nebula.maven-scm'
-extraArchive {
- sources = false
- javadoc = false
- tests = false
-}
+test.enabled = false
+jarHell.enabled = false
+licenseHeaders.enabled = false
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json
index 27d951fa41..2ff171bf52 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json
@@ -39,7 +39,19 @@
},
"fields": {
"type": "list",
- "description" : "Default comma-separated list of fields to return in the response for updates"
+ "description" : "Default comma-separated list of fields to return in the response for updates, can be overridden on each sub-request"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "Default list of fields to exclude from the returned _source field, can be overridden on each sub-request"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "Default list of fields to extract and return from the _source field, can be overridden on each sub-request"
},
"pipeline" : {
"type" : "string",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json
index 7858fc2ee9..03b67fd14c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json
@@ -33,6 +33,12 @@
"type": "list",
"description" : "Comma-separated list of column names to display"
},
+ "health": {
+ "type" : "enum",
+ "options" : ["green","yellow","red"],
+ "default" : null,
+ "description" : "A health status (\"green\", \"yellow\", or \"red\" to filter only indices matching the specified health status"
+ },
"help": {
"type": "boolean",
"description": "Return help information",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json
new file mode 100644
index 0000000000..f8aaa72723
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json
@@ -0,0 +1,45 @@
+{
+ "cat.templates": {
+ "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/templates",
+ "paths": ["/_cat/templates", "/_cat/templates/{name}"],
+ "parts": {
+ "name": {
+ "type" : "string",
+ "description" : "A pattern that returned template names must match"
+ }
+ },
+ "params": {
+ "format": {
+ "type" : "string",
+ "description" : "a short version of the Accept header, e.g. json, yaml"
+ },
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json
index c00f863577..f1a6a98217 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json
@@ -31,7 +31,7 @@
"description" : "Explicit operation timeout"
},
"wait_for_active_shards": {
- "type" : "number",
+ "type" : "string",
"description" : "Wait until the specified number of shards is active"
},
"wait_for_nodes": {
@@ -43,9 +43,9 @@
"options" : ["immediate", "urgent", "high", "normal", "low", "languid"],
"description" : "Wait until all currently queued events with the given priorty are processed"
},
- "wait_for_relocating_shards": {
- "type" : "number",
- "description" : "Wait until the specified number of relocating shards is finished"
+ "wait_for_no_relocating_shards": {
+ "type" : "boolean",
+ "description" : "Whether to wait until there are no relocating shards in the cluster"
},
"wait_for_status": {
"type" : "enum",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json
index f8743e1d1b..a734f7b1ba 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json
@@ -40,13 +40,13 @@
"type" : "boolean",
"description" : "Specify whether to return detailed information about score computation as part of a hit"
},
- "fields": {
+ "stored_fields": {
"type" : "list",
- "description" : "A comma-separated list of fields to return as part of a hit"
+ "description" : "A comma-separated list of stored fields to return as part of a hit"
},
- "fielddata_fields": {
+ "docvalue_fields": {
"type" : "list",
- "description" : "A comma-separated list of fields to return as the field data representation of a field for each hit"
+ "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit"
},
"from": {
"type" : "number",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json
index 30b5deff1d..328794ffdd 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json
@@ -41,9 +41,9 @@
"type" : "string",
"description" : "The default field for query string query (default: _all)"
},
- "fields": {
+ "stored_fields": {
"type": "list",
- "description" : "A comma-separated list of fields to return in the response"
+ "description" : "A comma-separated list of stored fields to return in the response"
},
"lenient": {
"type" : "boolean",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json
index e0e5170f9c..8aba39e771 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json
@@ -23,9 +23,9 @@
}
},
"params": {
- "fields": {
+ "stored_fields": {
"type": "list",
- "description" : "A comma-separated list of fields to return in the response"
+ "description" : "A comma-separated list of stored fields to return in the response"
},
"parent": {
"type" : "string",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json
index b46e3fcf11..677219adde 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json
@@ -39,7 +39,7 @@
"refresh": {
"type" : "enum",
"options": ["true", "false", "wait_for"],
- "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes."
+ "description" : "If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes."
},
"routing": {
"type" : "string",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json
index 37d9d979fa..d793199bc2 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json
@@ -3,8 +3,8 @@
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html",
"methods": ["HEAD"],
"url": {
- "path": "/{index}/{type}",
- "paths": ["/{index}/{type}"],
+ "path": "/{index}/_mapping/{type}",
+ "paths": ["/{index}/_mapping/{type}"],
"parts": {
"index": {
"type" : "list",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json
index d47619c73a..77d9e03716 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json
@@ -18,7 +18,7 @@
},
"wait_if_ongoing": {
"type" : "boolean",
- "description" : "If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is false and will cause an exception to be thrown on the shard level if another flush operation is already running."
+ "description" : "If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running."
},
"ignore_unavailable": {
"type" : "boolean",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json
index 0e5e4ffd24..6611145671 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json
@@ -29,7 +29,7 @@
"wait_for_completion": {
"type" : "boolean",
"description" : "Specify whether the request should block until the all segments are upgraded (default: false)"
- },
+ },
"only_ancient_segments": {
"type" : "boolean",
"description" : "If true, only ancient (an older Lucene major release) segments will be upgraded"
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json
index 1639f3619b..1f1f5adf75 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json
@@ -16,9 +16,9 @@
}
},
"params": {
- "fields": {
+ "stored_fields": {
"type": "list",
- "description" : "A comma-separated list of fields to return in the response"
+ "description" : "A comma-separated list of stored fields to return in the response"
},
"preference": {
"type" : "string",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json
index 39aa53b257..57e0e18966 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json
@@ -1,6 +1,6 @@
{
"msearch_template": {
- "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html",
+ "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html",
"methods": ["GET", "POST"],
"url": {
"path": "/_msearch/template",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json
index ddb7dcc4dc..5be7ea2740 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json
@@ -1,5 +1,5 @@
{
- "reindex.rethrottle": {
+ "reindex_rethrottle": {
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html",
"methods": ["POST"],
"url": {
@@ -15,7 +15,7 @@
"requests_per_second": {
"type": "float",
"required": true,
- "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts."
+ "description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle."
}
}
},
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json
index 67cbcf44a9..ff1d35bb41 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json
@@ -46,6 +46,14 @@
"type" : "enum",
"options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch"],
"description" : "Search operation type"
+ },
+ "explain": {
+ "type" : "boolean",
+ "description" : "Specify whether to return detailed information about score computation as part of a hit"
+ },
+ "profile": {
+ "type" : "boolean",
+ "description" : "Specify whether to profile the query execution"
}
}
},
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json
index 65a8deace7..760809cdf9 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json
@@ -21,6 +21,10 @@
"master_timeout": {
"type" : "time",
"description" : "Explicit operation timeout for connection to master node"
+ },
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown"
}
}
},
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json
index ebc9180cb5..cba488de79 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json
@@ -19,9 +19,13 @@
"master_timeout": {
"type" : "time",
"description" : "Explicit operation timeout for connection to master node"
+ },
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown"
}
}
},
"body": null
}
-} \ No newline at end of file
+}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json
index a18d081f9b..d87e4c5e7f 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json
@@ -31,6 +31,18 @@
"type": "list",
"description": "A comma-separated list of fields to return in the response"
},
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ },
"lang": {
"type": "string",
"description": "The script language (default: groovy)"
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json
index 72149adc66..b7f608b8b4 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json
@@ -40,13 +40,17 @@
"type" : "boolean",
"description" : "Specify whether to return detailed information about score computation as part of a hit"
},
- "fields": {
+ "stored_fields": {
"type" : "list",
- "description" : "A comma-separated list of fields to return as part of a hit"
+ "description" : "A comma-separated list of stored fields to return as part of a hit"
+ },
+ "docvalue_fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit"
},
"fielddata_fields": {
"type" : "list",
- "description" : "A comma-separated list of fields to return as the field data representation of a field for each hit"
+ "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit"
},
"from": {
"type" : "number",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc
index 16b607f4a2..612831575e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc
@@ -10,7 +10,7 @@ Elasticsearch as follows:
[source,sh]
---------------------
-bin/elasticsearch --script.inline true --node.testattr test --path.repo /tmp --repositories.url.allowed_urls 'http://snapshot.*'
+bin/elasticsearch -E script.inline true -E node.attr.testattr test -E path.repo /tmp -E repositories.url.allowed_urls 'http://snapshot.*'
---------------------
=======================================
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_fields.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_fields.yaml
deleted file mode 100644
index 3aa9d52263..0000000000
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_fields.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-"Fields":
- - do:
- index:
- refresh: true
- index: test_index
- type: test_type
- id: test_id_1
- body: { "foo": "bar" }
-
- - do:
- index:
- refresh: true
- index: test_index
- type: test_type
- id: test_id_2
- body: { "foo": "qux" }
-
- - do:
- index:
- refresh: true
- index: test_index
- type: test_type
- id: test_id_3
- body: { "foo": "corge" }
-
-
- - do:
- bulk:
- refresh: true
- body: |
- { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "fields": ["_source"] } }
- { "doc": { "foo": "baz" } }
- { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } }
- { "fields": ["_source"], "doc": { "foo": "quux" } }
-
- - match: { items.0.update.get._source.foo: baz }
- - match: { items.1.update.get._source.foo: quux }
-
- - do:
- bulk:
- index: test_index
- type: test_type
- fields: _source
- body: |
- { "update": { "_id": "test_id_3" } }
- { "doc": { "foo": "garply" } }
-
- - match: { items.0.update.get._source.foo: garply }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yaml
new file mode 100644
index 0000000000..c852c376cc
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yaml
@@ -0,0 +1,76 @@
+---
+"Source filtering":
+ - do:
+ index:
+ refresh: true
+ index: test_index
+ type: test_type
+ id: test_id_1
+ body: { "foo": "bar", "bar": "foo" }
+
+ - do:
+ index:
+ refresh: true
+ index: test_index
+ type: test_type
+ id: test_id_2
+ body: { "foo": "qux", "bar": "pux" }
+
+ - do:
+ index:
+ refresh: true
+ index: test_index
+ type: test_type
+ id: test_id_3
+ body: { "foo": "corge", "bar": "forge" }
+
+
+ - do:
+ bulk:
+ refresh: true
+ body: |
+ { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": true } }
+ { "doc": { "foo": "baz" } }
+ { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } }
+ { "_source": true, "doc": { "foo": "quux" } }
+
+ - match: { items.0.update.get._source.foo: baz }
+ - match: { items.1.update.get._source.foo: quux }
+
+ - do:
+ bulk:
+ index: test_index
+ type: test_type
+ _source: true
+ body: |
+ { "update": { "_id": "test_id_3" } }
+ { "doc": { "foo": "garply" } }
+
+ - match: { items.0.update.get._source.foo: garply }
+
+ - do:
+ bulk:
+ refresh: true
+ body: |
+ { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": {"includes": "bar"} } }
+ { "doc": { "foo": "baz" } }
+ { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } }
+ { "_source": {"includes": "foo"}, "doc": { "foo": "quux" } }
+
+ - match: { items.0.update.get._source.bar: foo }
+ - is_false: items.0.update.get._source.foo
+ - match: { items.1.update.get._source.foo: quux }
+ - is_false: items.1.update.get._source.bar
+
+ - do:
+ bulk:
+ index: test_index
+ type: test_type
+ _source_include: foo
+ body: |
+ { "update": { "_id": "test_id_3" } }
+ { "doc": { "foo": "garply" } }
+
+ - match: { items.0.update.get._source.foo: garply }
+ - is_false: items.0.update.get._source.bar
+
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml
index b07b5dadd9..24619e5335 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml
@@ -71,6 +71,47 @@
$/
---
+"Test cat indices using health status":
+
+ - do:
+ cluster.health: {}
+
+ - set: { number_of_data_nodes: count }
+
+ - do:
+ indices.create:
+ index: foo
+ body:
+ settings:
+ number_of_shards: "1"
+ number_of_replicas: "0"
+ - do:
+ indices.create:
+ index: bar
+ body:
+ settings:
+ number_of_shards: "1"
+ number_of_replicas: $count
+
+ - do:
+ cat.indices:
+ health: green
+ h: index
+
+ - match:
+ $body: |
+ /^(foo)$/
+
+ - do:
+ cat.indices:
+ health: yellow
+ h: index
+
+ - match:
+ $body: |
+ /^(bar)$/
+
+---
"Test cat indices using wildcards":
- do:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml
index a759efec9b..241ff71056 100755
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml
@@ -26,7 +26,7 @@
index1 \s+
\d \s+ # shard
(?:\d+ms|\d+(?:\.\d+)?s) \s+ # time in ms or seconds
- (store|replica|snapshot|relocating) \s+ # type
+ (empty_store|existing_store|peer|snapshot|local_shards) \s+ # source type
(init|index|verify_index|translog|finalize|done) \s+ # stage
[-\w./]+ \s+ # source_host
[-\w./]+ \s+ # target_host
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml
index 5e4a6b6369..a95314c57d 100755
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml
@@ -20,6 +20,7 @@
unassigned.at .+ \n
unassigned.for .+ \n
unassigned.details .+ \n
+ recoverysource.type .+ \n
completion.size .+ \n
fielddata.memory_size .+ \n
fielddata.evictions .+ \n
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml
new file mode 100644
index 0000000000..6758bec39d
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml
@@ -0,0 +1,176 @@
+---
+"Help":
+
+ - do:
+ cat.templates:
+ help: true
+
+ - match:
+ $body: |
+ /^ name .+ \n
+ template .+ \n
+ order .+ \n
+ version .+ \n
+ $/
+
+---
+"No templates":
+
+ - do:
+ cat.templates: {}
+
+ - match:
+ $body: |
+ /^
+ $/
+
+---
+"Normal templates":
+
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ order: 0
+ version: 1
+ template: test-*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ indices.put_template:
+ name: test_2
+ body:
+ order: 1
+ version: 2
+ template: test-2*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cat.templates: {}
+
+ - match:
+ $body: /
+ (^|\n)test \s+
+ test-\* \s+
+ 0 \s+
+ 1
+ (\n|$)
+ /
+
+ - match:
+ $body: /
+ (^|\n)test_2 \s+
+ test-2\* \s+
+ 1 \s+
+ 2
+ (\n|$)
+ /
+
+---
+"Filtered templates":
+
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ order: 0
+ version: 1
+ template: t*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ indices.put_template:
+ name: nomatch
+ body:
+ order: 2
+ version: 1
+ template: tea*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cat.templates:
+ name: test*
+
+ - match:
+ $body: |
+ /^
+ test \s+
+ t\* \s+
+ 0 \s+
+ 1
+ \n
+ $/
+
+---
+"Column headers":
+
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ order: 0
+ version: 1
+ template: t*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cat.templates:
+ v: true
+ name: test
+
+ - match:
+ $body: |
+ /^
+ name \s+
+ template \s+
+ order \s+
+ version
+ \n
+ test \s+
+ t\* \s+
+ 0 \s+
+ 1
+ \n
+ $/
+
+---
+"Select columns":
+
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ order: 0
+ version: 1
+ template: t*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cat.templates:
+ h: [name, template]
+ v: true
+ name: test*
+
+ - match:
+ $body: |
+ /^
+ name \s+
+ template
+ \n
+ test \s+
+ t\*
+ \n
+ $/
+
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yaml
index f803f3d589..c11a4e545d 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yaml
@@ -27,6 +27,59 @@
- do:
cluster.health:
wait_for_status: green
+ wait_for_no_relocating_shards: true
+
+ - is_true: cluster_name
+ - is_false: timed_out
+ - gte: { number_of_nodes: 1 }
+ - gte: { number_of_data_nodes: 1 }
+ - gt: { active_primary_shards: 0 }
+ - gt: { active_shards: 0 }
+ - gte: { relocating_shards: 0 }
+ - match: { initializing_shards: 0 }
+ - match: { unassigned_shards: 0 }
+ - gte: { number_of_pending_tasks: 0 }
+
+---
+"cluster health basic test, one index with wait for active shards":
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_active_shards: 1
+ wait_for_no_relocating_shards: true
+
+ - is_true: cluster_name
+ - is_false: timed_out
+ - gte: { number_of_nodes: 1 }
+ - gte: { number_of_data_nodes: 1 }
+ - gt: { active_primary_shards: 0 }
+ - gt: { active_shards: 0 }
+ - gte: { relocating_shards: 0 }
+ - match: { initializing_shards: 0 }
+ - match: { unassigned_shards: 0 }
+ - gte: { number_of_pending_tasks: 0 }
+
+---
+"cluster health basic test, one index with wait for all active shards":
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_active_shards: all
+ wait_for_no_relocating_shards: true
- is_true: cluster_name
- is_false: timed_out
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/20_request_timeout.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/20_request_timeout.yaml
index 295eea3ede..66a7cb2b48 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/20_request_timeout.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/20_request_timeout.yaml
@@ -1,10 +1,29 @@
---
-"cluster health request timeout":
+"cluster health request timeout on waiting for nodes":
- do:
catch: request_timeout
cluster.health:
wait_for_nodes: 10
- timeout: 1s
+ timeout: 1ms
+
+ - is_true: cluster_name
+ - is_true: timed_out
+ - gte: { number_of_nodes: 1 }
+ - gte: { number_of_data_nodes: 1 }
+ - match: { active_primary_shards: 0 }
+ - match: { active_shards: 0 }
+ - match: { relocating_shards: 0 }
+ - match: { initializing_shards: 0 }
+ - match: { unassigned_shards: 0 }
+ - gte: { number_of_pending_tasks: 0 }
+
+---
+"cluster health request timeout waiting for active shards":
+ - do:
+ catch: request_timeout
+ cluster.health:
+ timeout: 1ms
+ wait_for_active_shards: 5
- is_true: cluster_name
- is_true: timed_out
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml
index 236ad876d0..29f048068b 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml
@@ -19,7 +19,13 @@
- gte: { nodes.count.ingest: 0}
- gte: { nodes.count.coordinating_only: 0}
- is_true: nodes.os
+ - is_true: nodes.os.mem.total_in_bytes
+ - is_true: nodes.os.mem.free_in_bytes
+ - is_true: nodes.os.mem.used_in_bytes
+ - gte: { nodes.os.mem.free_percent: 0 }
+ - gte: { nodes.os.mem.used_percent: 0 }
- is_true: nodes.process
- is_true: nodes.jvm
- is_true: nodes.fs
- is_true: nodes.plugins
+ - is_true: nodes.network_types
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml
index 0342fdb019..1126a3d085 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml
@@ -1,25 +1,8 @@
---
"Create without ID":
- do:
+ catch: /Validation|Invalid/
create:
index: test_1
type: test
body: { foo: bar }
-
- - is_true: _id
- - match: { _index: test_1 }
- - match: { _type: test }
- - match: { _version: 1 }
- - set: { _id: id }
-
- - do:
- get:
- index: test_1
- type: test
- id: '$id'
-
- - match: { _index: test_1 }
- - match: { _type: test }
- - match: { _id: $id }
- - match: { _version: 1 }
- - match: { _source: { foo: bar }}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yaml
index 02f011d75b..44447ebee8 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yaml
@@ -28,7 +28,7 @@
type: test
id: 1
routing: 5
- fields: [_routing]
+ stored_fields: [_routing]
- match: { _id: "1"}
- match: { _routing: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yaml
index cac5387370..cb0dfcfe78 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yaml
@@ -31,7 +31,7 @@
type: test
id: 1
parent: 5
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yaml
index 55b840526e..5f352ac90c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yaml
@@ -32,7 +32,7 @@
id: 1
parent: 5
routing: 4
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_force_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_force_version.yaml
deleted file mode 100644
index c5d1ad31d7..0000000000
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_force_version.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-"Force version":
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar }
- version_type: force
- version: 5
-
- - match: { _version: 5}
-
- - do:
- delete:
- index: test_1
- type: test
- id: 1
- version_type: force
- version: 4
-
- - match: { _version: 4}
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar }
- version_type: force
- version: 6
-
- - match: { _version: 6}
-
- - do:
- delete:
- index: test_1
- type: test
- id: 1
- version_type: force
- version: 6
-
- - match: { _version: 6}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yaml
index 1025747a75..aa45376b52 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yaml
@@ -26,18 +26,18 @@
index: test_1
type: test
id: 1
- realtime: 1
-
- - is_true: ''
+ realtime: 0
+ - is_false: ''
+
- do:
exists:
index: test_1
type: test
id: 1
- realtime: 0
+ realtime: 1
- - is_false: ''
+ - is_true: ''
- do:
exists:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_fields.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yaml
index 15530b8be3..fbffb9e0ea 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_fields.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yaml
@@ -1,5 +1,19 @@
---
-"Fields":
+"Stored fields":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ properties:
+ foo:
+ type: keyword
+ store: true
+ count:
+ type: integer
+ store: true
- do:
index:
@@ -12,7 +26,7 @@
index: test_1
type: test
id: 1
- fields: foo
+ stored_fields: foo
- match: { _index: test_1 }
- match: { _type: test }
@@ -25,7 +39,7 @@
index: test_1
type: test
id: 1
- fields: [foo, count]
+ stored_fields: [foo, count]
- match: { fields.foo: [bar] }
- match: { fields.count: [1] }
@@ -36,7 +50,7 @@
index: test_1
type: test
id: 1
- fields: [foo, count, _source]
+ stored_fields: [foo, count, _source]
- match: { fields.foo: [bar] }
- match: { fields.count: [1] }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yaml
index 4a842cee2e..353dce8fab 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yaml
@@ -25,7 +25,7 @@ setup:
type: test
id: 1
parent: 中文
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: 中文 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yaml
index 11fe04884b..94a40c0437 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yaml
@@ -28,7 +28,7 @@
type: test
id: 1
routing: 5
- fields: [_routing]
+ stored_fields: [_routing]
- match: { _id: "1"}
- match: { _routing: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yaml
index a7ed2df0dd..db71d16423 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yaml
@@ -32,7 +32,7 @@
id: 1
parent: 5
routing: 4
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yaml
index 4631bed029..0c705de5e7 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yaml
@@ -22,21 +22,21 @@
body: { foo: bar }
- do:
+ catch: missing
get:
index: test_1
type: test
id: 1
- realtime: 1
-
- - is_true: found
+ realtime: 0
- do:
- catch: missing
get:
index: test_1
type: test
id: 1
- realtime: 0
+ realtime: 1
+
+ - is_true: found
- do:
get:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yaml
index 03572bbbc3..c858886ca3 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yaml
@@ -1,5 +1,17 @@
---
"Source filtering":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ properties:
+ count:
+ type: integer
+ store: true
+
- do:
index:
index: test_1
@@ -46,7 +58,7 @@
index: test_1
type: test
id: 1
- fields: count
+ stored_fields: count
_source: true
- match: { _index: test_1 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml
index f66322bf20..e255ce510e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml
@@ -86,30 +86,3 @@
id: 1
version: 1
version_type: external_gte
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 2
- version_type: force
- - match: { _id: "1" }
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 10
- version_type: force
- - match: { _id: "1" }
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 1
- version_type: force
- - match: { _id: "1" }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yaml
index 92f21caf8b..20009c87b2 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yaml
@@ -22,21 +22,21 @@
body: { foo: bar }
- do:
+ catch: missing
get_source:
index: test_1
type: test
id: 1
- realtime: 1
-
- - match: { '': {foo: bar}}
+ realtime: 0
- do:
- catch: missing
get_source:
index: test_1
type: test
id: 1
- realtime: 0
+ realtime: 1
+
+ - match: { '': {foo: bar}}
- do:
get_source:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_force_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_force_version.yaml
deleted file mode 100644
index 17d1edc303..0000000000
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_force_version.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-"Force version":
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar }
- version_type: force
- version: 5
-
- - match: { _version: 5}
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar }
- version_type: force
- version: 4
-
- - match: { _version: 4}
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar2 }
- version_type: force
- version: 5
-
- - match: { _version: 5}
-
- - do:
- index:
- index: test_1
- type: test
- id: 1
- body: { foo: bar3 }
- version_type: force
- version: 5
-
- - match: { _version: 5}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yaml
index bb248a458a..7b3c21df4e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yaml
@@ -28,7 +28,7 @@
type: test
id: 1
routing: 5
- fields: [_routing]
+ stored_fields: [_routing]
- match: { _id: "1"}
- match: { _routing: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yaml
index 916e1ac352..89077dac4a 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yaml
@@ -31,7 +31,7 @@
type: test
id: 1
parent: 5
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yaml
index a7ed2df0dd..db71d16423 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yaml
@@ -32,7 +32,7 @@
id: 1
parent: 5
routing: 4
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _id: "1"}
- match: { _parent: "5"}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml
index cabfbfeb8b..38ca46570f 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml
@@ -95,3 +95,26 @@
- match: {test_index.aliases.test_clias.filter.term.field: value}
- is_false: test_index.aliases.test_clias.index_routing
- is_false: test_index.aliases.test_clias.search_routing
+
+---
+"Create index with no type mappings":
+ - do:
+ catch: /illegal_argument_exception/
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ "" : {}
+
+---
+"Create index with invalid mappings":
+ - do:
+ catch: /illegal_argument_exception/
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ "":
+ type: keyword
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml
index 102f4d46c9..1d33f2d31b 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml
@@ -51,3 +51,19 @@
- match: {test_index.mappings.test_type.properties.text1.type: text}
- match: {test_index.mappings.test_type.properties.subfield.properties.text3.type: text}
- match: {test_index.mappings.test_type.properties.text1.fields.text_raw.type: keyword}
+
+---
+"Create index with invalid mappings":
+ - do:
+ indices.create:
+ index: test_index
+ - do:
+ catch: /illegal_argument_exception/
+ indices.put_mapping:
+ index: test_index
+ type: test_type
+ body:
+ test_type:
+ properties:
+ "":
+ type: keyword
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml
index e599ae43b9..3d70e930a0 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml
@@ -70,3 +70,99 @@
settings:
number_of_shards: 1
number_of_replicas: 0
+
+---
+"Test Put Versioned Template":
+ - do:
+ indices.put_template:
+ name: "my_template"
+ body: >
+ {
+ "version": 10,
+ "template": "*",
+ "settings": { "number_of_shards": 1 }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ indices.get_template:
+ name: "my_template"
+ - match: { my_template.version: 10 }
+
+ # Lower version
+ - do:
+ indices.put_template:
+ name: "my_template"
+ body: >
+ {
+ "version": 9,
+ "template": "*",
+ "settings": { "number_of_shards": 1 }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ indices.get_template:
+ name: "my_template"
+ - match: { my_template.version: 9 }
+
+ # Higher version
+ - do:
+ indices.put_template:
+ name: "my_template"
+ body: >
+ {
+ "version": 6789,
+ "template": "*",
+ "settings": { "number_of_shards": 1 }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ indices.get_template:
+ name: "my_template"
+ - match: { my_template.version: 6789 }
+
+ # No version
+ - do:
+ indices.put_template:
+ name: "my_template"
+ body: >
+ {
+ "template": "*",
+ "settings": { "number_of_shards": 1 }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ indices.get_template:
+ name: "my_template"
+ - is_false: my_template.version
+
+ # Coming back with a version
+ - do:
+ indices.put_template:
+ name: "my_template"
+ body: >
+ {
+ "version": 5385,
+ "template": "*",
+ "settings": { "number_of_shards": 1 }
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ indices.get_template:
+ name: "my_template"
+ - match: { my_template.version: 5385 }
+
+ # Able to delete the versioned template
+ - do:
+ indices.delete_template:
+ name: "my_template"
+ - match: { acknowledged: true }
+
+ - do:
+ catch: missing
+ indices.get_template:
+ name: "my_template"
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yaml
index 55ce29c754..fd8937a23c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yaml
@@ -18,7 +18,7 @@
index: [test_1]
human: true
- - match: { test_1.shards.0.type: "STORE" }
+ - match: { test_1.shards.0.type: "EMPTY_STORE" }
- match: { test_1.shards.0.stage: "DONE" }
- match: { test_1.shards.0.primary: true }
- match: { test_1.shards.0.start_time: /^2\d\d\d-.+/ }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml
index 4aeb66812c..9569728ce7 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml
@@ -47,7 +47,7 @@
cluster.health:
wait_for_status: green
index: source
- wait_for_relocating_shards: 0
+ wait_for_no_relocating_shards: true
wait_for_events: "languid"
# now we do the actual shrink
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/10_basic.yaml
index a924c1311e..b7cae037b8 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/10_basic.yaml
@@ -82,3 +82,87 @@
- match: {test_index.aliases.test_alias1: {'index_routing': 'routing_value', 'search_routing': 'routing_value'}}
- match: {test_index.aliases.test_alias2: {'index_routing': 'routing_value', 'search_routing': 'routing_value'}}
+
+---
+"Remove alias":
+ - do:
+ indices.create:
+ index: test_index
+ - do:
+ indices.exists_alias:
+ name: test_alias1
+ - is_false: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias2
+ - is_false: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias3
+ - is_false: ''
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ aliases: [test_alias1, test_alias2]
+ - do:
+ indices.exists_alias:
+ name: test_alias1
+ - is_true: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias2
+ - is_true: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias3
+ - is_false: ''
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - remove:
+ index: test_index
+ alias: test_alias1
+ - add:
+ index: test_index
+ alias: test_alias3
+ - do:
+ indices.exists_alias:
+ name: test_alias1
+ - is_false: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias2
+ - is_true: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias3
+ - is_true: ''
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - remove:
+ index: test_index
+ alias: test_alias2
+ - remove:
+ index: test_index
+ alias: test_alias3
+ - do:
+ indices.exists_alias:
+ name: test_alias1
+ - is_false: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias2
+ - is_false: ''
+ - do:
+ indices.exists_alias:
+ name: test_alias3
+ - is_false: ''
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml
new file mode 100644
index 0000000000..14e258a6bb
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml
@@ -0,0 +1,34 @@
+---
+"Remove and index and replace it with an alias":
+
+ - do:
+ indices.create:
+ index: test
+ - do:
+ indices.create:
+ index: test_2
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_2
+ aliases: [test, test_write]
+ - remove_index:
+ index: test
+
+ - do:
+ indices.exists_alias:
+ name: test
+ - is_true: ''
+
+ - do:
+ indices.exists_alias:
+ name: test_write
+ - is_true: ''
+
+ - do:
+ indices.get_mapping:
+ index: test
+ - is_true: test_2 # the name of the index that the alias points to, would be `test` if the index were still there
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml
index d46ec7ee2a..d0c99ee0a7 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml
@@ -3,6 +3,7 @@
- do: {info: {}}
- is_true: name
- is_true: cluster_name
+ - is_true: cluster_uuid
- is_true: tagline
- is_true: version
- is_true: version.number
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_basic.yaml
index 6e822bf1da..e5b9b0fd5e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_basic.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/10_basic.yaml
@@ -27,6 +27,96 @@
id: "my_pipeline"
---
+"Test Put Versioned Pipeline":
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "version": 10,
+ "processors": [ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.get_pipeline:
+ id: "my_pipeline"
+ - match: { my_pipeline.version: 10 }
+
+ # Lower version
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "version": 9,
+ "processors": [ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.get_pipeline:
+ id: "my_pipeline"
+ - match: { my_pipeline.version: 9 }
+
+ # Higher version
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "version": 6789,
+ "processors": [ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.get_pipeline:
+ id: "my_pipeline"
+ - match: { my_pipeline.version: 6789 }
+
+ # No version
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "processors": [ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.get_pipeline:
+ id: "my_pipeline"
+ - is_false: my_pipeline.version
+
+ # Coming back with a version
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "version": 5385,
+ "processors": [ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.get_pipeline:
+ id: "my_pipeline"
+ - match: { my_pipeline.version: 5385 }
+
+ # Able to delete the versioned pipeline
+ - do:
+ ingest.delete_pipeline:
+ id: "my_pipeline"
+ - match: { acknowledged: true }
+
+ - do:
+ catch: missing
+ ingest.get_pipeline:
+ id: "my_pipeline"
+---
"Test Get All Pipelines":
- do:
ingest.put_pipeline:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_fields.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yaml
index f56859ec3c..33cab111a8 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_fields.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yaml
@@ -1,5 +1,19 @@
---
-"Fields":
+"Stored fields":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ properties:
+ foo:
+ type: keyword
+ store: true
+ count:
+ type: integer
+ store: true
- do:
index:
@@ -15,9 +29,9 @@
body:
docs:
- { _id: 1 }
- - { _id: 1, fields: foo }
- - { _id: 1, fields: [foo] }
- - { _id: 1, fields: [foo, _source] }
+ - { _id: 1, stored_fields: foo }
+ - { _id: 1, stored_fields: [foo] }
+ - { _id: 1, stored_fields: [foo, _source] }
- is_false: docs.0.fields
- match: { docs.0._source: { foo: bar }}
@@ -35,13 +49,13 @@
mget:
index: test_1
type: test
- fields: foo
+ stored_fields: foo
body:
docs:
- { _id: 1 }
- - { _id: 1, fields: foo }
- - { _id: 1, fields: [foo] }
- - { _id: 1, fields: [foo, _source] }
+ - { _id: 1, stored_fields: foo }
+ - { _id: 1, stored_fields: [foo] }
+ - { _id: 1, stored_fields: [foo, _source] }
- match: { docs.0.fields.foo: [bar] }
- is_false: docs.0._source
@@ -59,13 +73,13 @@
mget:
index: test_1
type: test
- fields: [foo]
+ stored_fields: [foo]
body:
docs:
- { _id: 1 }
- - { _id: 1, fields: foo }
- - { _id: 1, fields: [foo] }
- - { _id: 1, fields: [foo, _source] }
+ - { _id: 1, stored_fields: foo }
+ - { _id: 1, stored_fields: [foo] }
+ - { _id: 1, stored_fields: [foo, _source] }
- match: { docs.0.fields.foo: [bar] }
- is_false: docs.0._source
@@ -83,13 +97,13 @@
mget:
index: test_1
type: test
- fields: [foo, _source]
+ stored_fields: [foo, _source]
body:
docs:
- { _id: 1 }
- - { _id: 1, fields: foo }
- - { _id: 1, fields: [foo] }
- - { _id: 1, fields: [foo, _source] }
+ - { _id: 1, stored_fields: foo }
+ - { _id: 1, stored_fields: [foo] }
+ - { _id: 1, stored_fields: [foo, _source] }
- match: { docs.0.fields.foo: [bar] }
- match: { docs.0._source: { foo: bar }}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yaml
index e1cf8df4fc..ad064df69c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yaml
@@ -34,9 +34,9 @@
body:
docs:
- { _id: 1 }
- - { _id: 1, parent: 5, fields: [ _parent, _routing ] }
- - { _id: 1, parent: 4, fields: [ _parent, _routing ] }
- - { _id: 2, parent: 5, fields: [ _parent, _routing ] }
+ - { _id: 1, parent: 5, stored_fields: [ _parent, _routing ] }
+ - { _id: 1, parent: 4, stored_fields: [ _parent, _routing ] }
+ - { _id: 2, parent: 5, stored_fields: [ _parent, _routing ] }
- is_false: docs.0.found
- is_false: docs.1.found
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yaml
index 71ac0feabb..7196412ebf 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yaml
@@ -26,7 +26,7 @@
mget:
index: test_1
type: test
- fields: [_routing]
+ stored_fields: [_routing]
body:
docs:
- { _id: 1 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yaml
index 19b597675c..1fa1ce2cdd 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yaml
@@ -29,7 +29,7 @@
mget:
index: test_1
type: test
- fields: [ _routing , _parent]
+ stored_fields: [ _routing , _parent]
body:
docs:
- { _id: 1 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yaml
index 4031f40525..432e5d8c20 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yaml
@@ -152,3 +152,49 @@
- is_false: nodes.$master.fs.data.0.path
- is_true: nodes.$master.fs.data.0.type
- is_true: nodes.$master.fs.data.0.total_in_bytes
+
+---
+"Nodes Stats filtered using both includes and excludes filters":
+ - do:
+ cluster.state: {}
+
+ # Get master node id
+ - set: { master_node: master }
+
+ # Nodes Stats with "nodes" field but no JVM stats
+ - do:
+ nodes.stats:
+ filter_path: [ "nodes", "-nodes.*.jvm", "-nodes.*.indices" ]
+
+ - is_false: cluster_name
+ - is_true: nodes
+ - is_true: nodes.$master.name
+ - is_true: nodes.$master.os
+ - is_false: nodes.$master.indices
+ - is_false: nodes.$master.jvm
+
+ # Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments
+ - do:
+ nodes.stats:
+ filter_path: "nodes.*.indices,-nodes.*.indices.segments"
+
+ - is_false: cluster_name
+ - is_true: nodes
+ - is_false: nodes.$master.name
+ - is_true: nodes.$master.indices
+ - is_true: nodes.$master.indices.docs
+ - is_false: nodes.$master.indices.segments
+
+ # Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field
+ - do:
+ nodes.stats:
+ filter_path: "nodes.*.fs.data.t*,-**.type"
+
+ - is_false: cluster_name
+ - is_true: nodes
+ - is_false: nodes.$master.name
+ - is_false: nodes.$master.indices
+ - is_false: nodes.$master.jvm
+ - is_true: nodes.$master.fs.data
+ - is_false: nodes.$master.fs.data.0.type
+ - is_true: nodes.$master.fs.data.0.total_in_bytes
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yaml
new file mode 100644
index 0000000000..f39b4dbd3f
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yaml
@@ -0,0 +1,44 @@
+setup:
+ - do:
+ indices.create:
+ index: test
+ - do:
+ index:
+ index: test
+ type: test
+ id: 1
+ body: { foo: bar }
+ - do:
+ indices.refresh:
+ index: [test]
+
+---
+"Stored fields":
+ - do:
+ search:
+ index: test
+
+ - is_true: hits.hits.0._id
+ - is_true: hits.hits.0._type
+ - is_true: hits.hits.0._source
+
+ - do:
+ search:
+ index: test
+ body:
+ stored_fields: []
+
+ - is_true: hits.hits.0._id
+ - is_true: hits.hits.0._type
+ - is_false: hits.hits.0._source
+
+ - do:
+ search:
+ index: test
+ body:
+ stored_fields: "_none_"
+
+ - is_false: hits.hits.0._id
+ - is_false: hits.hits.0._type
+ - is_false: hits.hits.0._source
+
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml
index 48857522cb..c8f6871295 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml
@@ -1,11 +1,22 @@
---
setup:
- do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ test:
+ properties:
+ bigint:
+ type: keyword
+
+
+ - do:
index:
index: test_1
type: test
id: 1
- body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1, "bigint": 72057594037927936 }
- do:
indices.refresh: {}
@@ -91,6 +102,17 @@ setup:
- is_false: hits.hits.0._source.include.field2
---
+"_source include on bigint":
+ - do:
+ search:
+ body:
+ _source:
+ includes: bigint
+ query: { match_all: {} }
+ - match: { hits.hits.0._source.bigint: 72057594037927936 }
+ - is_false: hits.hits.0._source.include.field2
+
+---
"fields in body":
- do:
search:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
index e7d51c5bce..bff9a16960 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
@@ -84,3 +84,64 @@
- is_true: hits.hits.1._index
- is_false: hits.hits.1._type
- is_true: hits.hits.1._id
+
+---
+"Search results filtered using both includes and excludes filters":
+ - do:
+ bulk:
+ refresh: true
+ body: |
+ {"index": {"_index": "index-1", "_type": "type-1", "_id": "1"}}
+ {"name": "First document", "properties": {"size": 123, "meta": {"foo": "bar"}}}
+ {"index": {"_index": "index-1", "_type": "type-1", "_id": "2"}}
+ {"name": "Second document", "properties": {"size": 465, "meta": {"foo": "bar", "baz": "qux"}}}
+
+ - do:
+ search:
+ filter_path: [ "-**._source.properties", "**._source" ]
+ body: { query: { match_all: {} } }
+
+ - is_false: took
+ - is_true: hits.hits.0._source
+ - is_true: hits.hits.0._source.name
+ - is_false: hits.hits.0._source.properties
+ - is_true: hits.hits.1._source
+ - is_true: hits.hits.1._source.name
+ - is_false: hits.hits.1._source.properties
+
+ - do:
+ search:
+ filter_path: [ "**.properties" , "-hits.hits._source.properties.meta" ]
+ body: { query: { match_all: {} } }
+
+ - is_false: took
+ - is_true: hits.hits.0._source
+ - is_false: hits.hits.0._source.name
+ - is_true: hits.hits.0._source.properties
+ - is_true: hits.hits.0._source.properties.size
+ - is_false: hits.hits.0._source.properties.meta
+ - is_true: hits.hits.1._source
+ - is_false: hits.hits.1._source.name
+ - is_true: hits.hits.1._source.properties
+ - is_true: hits.hits.1._source.properties.size
+ - is_false: hits.hits.1._source.properties.meta
+
+ - do:
+ search:
+ filter_path: "**._source,-**.meta.foo"
+ body: { query: { match_all: {} } }
+
+ - is_false: took
+ - is_true: hits.hits.0._source
+ - is_true: hits.hits.0._source.name
+ - is_true: hits.hits.0._source.properties
+ - is_true: hits.hits.0._source.properties.size
+ - is_false: hits.hits.0._source.properties.meta.foo
+
+ - do:
+ count:
+ filter_path: "-*"
+ body: { query: { match_all: {} } }
+
+ - is_false: count
+ - is_false: _shards
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml
new file mode 100644
index 0000000000..24a7ac6adc
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml
@@ -0,0 +1,54 @@
+---
+setup:
+
+ - do:
+ snapshot.create_repository:
+ repository: test_repo_get_1
+ body:
+ type: fs
+ settings:
+ location: "test_repo_get_1_loc"
+
+---
+"Get snapshot info":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ snapshot.create:
+ repository: test_repo_get_1
+ snapshot: test_snapshot
+ wait_for_completion: true
+
+ - do:
+ snapshot.get:
+ repository: test_repo_get_1
+ snapshot: test_snapshot
+
+ - is_true: snapshots
+
+---
+"Get missing snapshot info throws an exception":
+
+ - do:
+ catch: /snapshot_missing_exception.+ is missing/
+ snapshot.get:
+ repository: test_repo_get_1
+ snapshot: test_nonexistent_snapshot
+
+---
+"Get missing snapshot info succeeds when ignoreUnavailable is true":
+
+ - do:
+ snapshot.get:
+ repository: test_repo_get_1
+ snapshot: test_nonexistent_snapshot
+ ignore_unavailable: true
+
+ - is_true: snapshots
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml
new file mode 100644
index 0000000000..838c126497
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml
@@ -0,0 +1,54 @@
+---
+setup:
+
+ - do:
+ snapshot.create_repository:
+ repository: test_repo_status_1
+ body:
+ type: fs
+ settings:
+ location: "test_repo_status_1_loc"
+
+---
+"Get snapshot status":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ snapshot.create:
+ repository: test_repo_status_1
+ snapshot: test_snapshot
+ wait_for_completion: true
+
+ - do:
+ snapshot.status:
+ repository: test_repo_status_1
+ snapshot: test_snapshot
+
+ - is_true: snapshots
+
+---
+"Get missing snapshot status throws an exception":
+
+ - do:
+ catch: /snapshot_missing_exception.+ is missing/
+ snapshot.status:
+ repository: test_repo_status_1
+ snapshot: test_nonexistent_snapshot
+
+---
+"Get missing snapshot status succeeds when ignoreUnavailable is true":
+
+ - do:
+ snapshot.status:
+ repository: test_repo_status_1
+ snapshot: test_nonexistent_snapshot
+ ignore_unavailable: true
+
+ - is_true: snapshots
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/40_versions.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/40_versions.yaml
index f66322bf20..e255ce510e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/40_versions.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/40_versions.yaml
@@ -86,30 +86,3 @@
id: 1
version: 1
version_type: external_gte
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 2
- version_type: force
- - match: { _id: "1" }
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 10
- version_type: force
- - match: { _id: "1" }
-
- - do:
- get:
- index: test_1
- type: test
- id: 1
- version: 1
- version_type: force
- - match: { _id: "1" }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yaml
index e96c175489..097f49007e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yaml
@@ -30,7 +30,7 @@
type: test
id: 1
routing: 5
- fields: _routing
+ stored_fields: _routing
- match: { _routing: "5"}
@@ -49,9 +49,9 @@
type: test
id: 1
routing: 5
- fields: foo
+ _source: foo
body:
doc: { foo: baz }
- - match: { get.fields.foo: [baz] }
+ - match: { get._source.foo: baz }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yaml
index b25662dbf1..82508f951e 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yaml
@@ -36,7 +36,7 @@ setup:
type: test
id: 1
parent: 5
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _parent: "5"}
- match: { _routing: "5"}
@@ -47,11 +47,11 @@ setup:
type: test
id: 1
parent: 5
- fields: foo
+ _source: foo
body:
doc: { foo: baz }
- - match: { get.fields.foo: [baz] }
+ - match: { get._source.foo: baz }
---
"Parent omitted":
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yaml
index 89dc83198c..e75eddff9a 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yaml
@@ -34,7 +34,7 @@
id: 1
routing: 4
parent: 5
- fields: [_parent, _routing]
+ stored_fields: [_parent, _routing]
- match: { _parent: "5"}
- match: { _routing: "4"}
@@ -56,9 +56,9 @@
id: 1
parent: 5
routing: 4
- fields: foo
+ _source: foo
body:
doc: { foo: baz }
- - match: { get.fields.foo: [baz] }
+ - match: { get._source.foo: baz }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_fields.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yaml
index 86d6afa069..4bb22e6b80 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_fields.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yaml
@@ -1,19 +1,18 @@
---
-"Fields":
+"Source filtering":
- do:
update:
index: test_1
type: test
id: 1
- fields: foo,bar,_source
+ _source: [foo, bar]
body:
doc: { foo: baz }
upsert: { foo: bar }
- match: { get._source.foo: bar }
- - match: { get.fields.foo: [bar] }
- - is_false: get.fields.bar
+ - is_false: get._source.bar
# TODO:
#
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yaml
index 5478d84e2a..7a6a58e12c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yaml
@@ -33,6 +33,6 @@
type: test
id: 1
parent: 5
- fields: [ _parent, _routing ]
+ stored_fields: [ _parent, _routing ]
diff --git a/settings.gradle b/settings.gradle
index 8aeb694b51..81513fd372 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -9,6 +9,7 @@ List projects = [
'client:sniffer',
'client:transport',
'client:test',
+ 'client:client-benchmark-noop-api-plugin',
'client:benchmark',
'benchmarks',
'distribution:integ-test-zip',
@@ -37,13 +38,13 @@ List projects = [
'plugins:analysis-stempel',
'plugins:discovery-azure-classic',
'plugins:discovery-ec2',
+ 'plugins:discovery-file',
'plugins:discovery-gce',
'plugins:ingest-geoip',
'plugins:ingest-attachment',
'plugins:ingest-user-agent',
'plugins:lang-javascript',
'plugins:lang-python',
- 'plugins:mapper-attachments',
'plugins:mapper-murmur3',
'plugins:mapper-size',
'plugins:repository-azure',
@@ -54,6 +55,7 @@ List projects = [
'plugins:store-smb',
'qa:backwards-5.0',
'qa:evil-tests',
+ 'qa:rolling-upgrade',
'qa:smoke-test-client',
'qa:smoke-test-ingest-with-all-dependencies',
'qa:smoke-test-ingest-disabled',
diff --git a/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java
index 35ef0868eb..c752563b80 100644
--- a/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java
@@ -53,6 +53,7 @@ import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
+import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
@@ -93,7 +94,7 @@ import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
-/**
+/**
* Alerts us if new analyzers are added to lucene, so we don't miss them.
* <p>
* If we don't want to expose one for a specific reason, just map it to Void.
@@ -115,11 +116,11 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("thai", ThaiTokenizerFactory.class)
.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class)
.put("whitespace", WhitespaceTokenizerFactory.class)
-
+
// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
.put("wikipedia", Void.class)
.immutableMap();
-
+
static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
// exposed in ES
.put("apostrophe", ApostropheFilterFactory.class)
@@ -184,6 +185,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class)
.put("serbiannormalization", SerbianNormalizationFilterFactory.class)
.put("shingle", ShingleTokenFilterFactory.class)
+ .put("minhash", MinHashTokenFilterFactory.class)
.put("snowballporter", SnowballTokenFilterFactory.class)
.put("soraninormalization", SoraniNormalizationFilterFactory.class)
.put("soranistem", StemmerTokenFilterFactory.class)
@@ -199,7 +201,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("type", KeepTypesFilterFactory.class)
.put("uppercase", UpperCaseTokenFilterFactory.class)
.put("worddelimiter", WordDelimiterTokenFilterFactory.class)
-
+
// TODO: these tokenfilters are not yet exposed: useful?
// suggest stop
@@ -228,14 +230,15 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("fingerprint", Void.class)
// for tee-sinks
.put("daterecognizer", Void.class)
+
.immutableMap();
-
+
static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
// exposed in ES
.put("htmlstrip", HtmlStripCharFilterFactory.class)
.put("mapping", MappingCharFilterFactory.class)
.put("patternreplace", PatternReplaceCharFilterFactory.class)
-
+
// TODO: these charfilters are not yet exposed: useful?
// handling of zwnj for persian
.put("persian", Void.class)
diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
index fe624297e7..ed8725fa00 100644
--- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
+++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
@@ -20,8 +20,6 @@
package org.elasticsearch.bootstrap;
import com.carrotsearch.randomizedtesting.RandomizedRunner;
-import org.apache.log4j.Java9Hack;
-import org.apache.lucene.util.Constants;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.SecureSM;
import org.elasticsearch.common.Strings;
@@ -91,10 +89,6 @@ public class BootstrapForTesting {
throw new RuntimeException("found jar hell in test classpath", e);
}
- if (Constants.JRE_IS_MINIMUM_JAVA9) {
- Java9Hack.fixLog4j();
- }
-
// install security manager if requested
if (systemPropertyAsBoolean("tests.security.manager", true)) {
try {
diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java
index 2a04a5be97..b3bbd5a9a4 100644
--- a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java
@@ -32,7 +32,7 @@ import static org.hamcrest.CoreMatchers.equalTo;
abstract class ESElasticsearchCliTestCase extends ESTestCase {
interface InitConsumer {
- void accept(final boolean foreground, final Path pidFile, final Map<String, String> esSettings);
+ void accept(final boolean foreground, final Path pidFile, final boolean quiet, final Map<String, String> esSettings);
}
void runTest(
@@ -46,9 +46,9 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase {
final AtomicBoolean init = new AtomicBoolean();
final int status = Elasticsearch.main(args, new Elasticsearch() {
@Override
- void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) {
+ void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings) {
init.set(true);
- initConsumer.accept(!daemonize, pidFile, esSettings);
+ initConsumer.accept(!daemonize, pidFile, quiet, esSettings);
}
}, terminal);
assertThat(status, equalTo(expectedStatus));
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java
index 482d7c22c8..89fafc74c8 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java
@@ -16,40 +16,31 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.test;
+
+package org.elasticsearch.cluster;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterInfoService;
-import org.elasticsearch.cluster.ClusterModule;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.EmptyClusterInfoService;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
-import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
-import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
-import org.elasticsearch.gateway.AsyncShardFetch;
import org.elasticsearch.gateway.GatewayAllocator;
-import org.elasticsearch.gateway.ReplicaShardAllocator;
-import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
-import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.gateway.TestGatewayAllocator;
-import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -62,12 +53,12 @@ import java.util.Set;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.is;
/**
*/
public abstract class ESAllocationTestCase extends ESTestCase {
+ private static final ClusterSettings EMPTY_CLUSTER_SETTINGS =
+ new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
public static MockAllocationService createAllocationService() {
return createAllocationService(Settings.Builder.EMPTY_SETTINGS);
@@ -78,51 +69,32 @@ public abstract class ESAllocationTestCase extends ESTestCase {
}
public static MockAllocationService createAllocationService(Settings settings, Random random) {
- return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random);
+ return createAllocationService(settings, EMPTY_CLUSTER_SETTINGS, random);
}
public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) {
return new MockAllocationService(settings,
randomAllocationDeciders(settings, clusterSettings, random),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE);
+ new TestGatewayAllocator(), new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE);
}
public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) {
return new MockAllocationService(settings,
- randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()),
- NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), clusterInfoService);
+ randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()),
+ new TestGatewayAllocator(), new BalancedShardsAllocator(settings), clusterInfoService);
}
public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator gatewayAllocator) {
return new MockAllocationService(settings,
- randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()),
+ randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()),
gatewayAllocator, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE);
}
public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettings clusterSettings, Random random) {
- final List<Class<? extends AllocationDecider>> defaultAllocationDeciders = ClusterModule.DEFAULT_ALLOCATION_DECIDERS;
- final List<AllocationDecider> list = new ArrayList<>();
- for (Class<? extends AllocationDecider> deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
- try {
- try {
- Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, ClusterSettings.class);
- list.add(constructor.newInstance(settings, clusterSettings));
- } catch (NoSuchMethodException e) {
- Constructor<? extends AllocationDecider> constructor = null;
- constructor = deciderClass.getConstructor(Settings.class);
- list.add(constructor.newInstance(settings));
- }
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- assertThat(list.size(), equalTo(defaultAllocationDeciders.size()));
- for (AllocationDecider d : list) {
- assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
- }
- Randomness.shuffle(list);
- return new AllocationDeciders(settings, list.toArray(new AllocationDecider[list.size()]));
-
+ List<AllocationDecider> deciders = new ArrayList<>(
+ ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList()));
+ Collections.shuffle(deciders, random);
+ return new AllocationDeciders(settings, deciders);
}
protected static Set<DiscoveryNode.Role> MASTER_DATA_ROLES =
@@ -153,22 +125,34 @@ public abstract class ESAllocationTestCase extends ESTestCase {
if (initializingShards.isEmpty()) {
return clusterState;
}
- RoutingTable routingTable = strategy.applyStartedShards(clusterState, arrayAsArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();
- return ClusterState.builder(clusterState).routingTable(routingTable).build();
+ return strategy.applyStartedShards(clusterState,
+ arrayAsArrayList(initializingShards.get(randomInt(initializingShards.size() - 1))));
}
protected static AllocationDeciders yesAllocationDeciders() {
- return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.YES),
- new SameShardAllocationDecider(Settings.EMPTY)});
+ return new AllocationDeciders(Settings.EMPTY, Arrays.asList(
+ new TestAllocateDecision(Decision.YES),
+ new SameShardAllocationDecider(Settings.EMPTY)));
}
protected static AllocationDeciders noAllocationDeciders() {
- return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.NO)});
+ return new AllocationDeciders(Settings.EMPTY, Collections.singleton(new TestAllocateDecision(Decision.NO)));
}
protected static AllocationDeciders throttleAllocationDeciders() {
- return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.THROTTLE),
- new SameShardAllocationDecider(Settings.EMPTY)});
+ return new AllocationDeciders(Settings.EMPTY, Arrays.asList(
+ new TestAllocateDecision(Decision.THROTTLE),
+ new SameShardAllocationDecider(Settings.EMPTY)));
+ }
+
+ protected ClusterState applyStartedShardsUntilNoChange(ClusterState clusterState, AllocationService service) {
+ ClusterState lastClusterState;
+ do {
+ lastClusterState = clusterState;
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ clusterState = service.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+ } while (lastClusterState.equals(clusterState) == false);
+ return clusterState;
}
public static class TestAllocateDecision extends AllocationDecider {
@@ -220,34 +204,32 @@ public abstract class ESAllocationTestCase extends ESTestCase {
* Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet.
*/
protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator {
- private final ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) {
- @Override
- protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation) {
- return new AsyncShardFetch.FetchResult<>(shard.shardId(), null, Collections.<String>emptySet(), Collections.<String>emptySet());
- }
- };
-
public DelayedShardsMockGatewayAllocator() {
super(Settings.EMPTY, null, null);
}
@Override
- public void applyStartedShards(StartedRerouteAllocation allocation) {}
+ public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
+ // no-op
+ }
@Override
- public void applyFailedShards(FailedRerouteAllocation allocation) {}
+ public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
+ // no-op
+ }
@Override
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next();
- IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndexName());
- if (shard.primary() || shard.allocatedPostIndexCreate(indexMetaData) == false) {
+ if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
continue;
}
- replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
+ if (shard.unassignedInfo().isDelayed()) {
+ unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes());
+ }
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java
index 707bfd9057..8eadc728a1 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java
@@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing;
+import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
+
/**
* A helper class that allows access to package private APIs for testing.
*/
@@ -44,17 +46,17 @@ public class ShardRoutingHelper {
return routing.initialize(nodeId, null, expectedSize);
}
- public static ShardRouting reinit(ShardRouting routing) {
- return routing.reinitializeShard();
+ public static ShardRouting reinitPrimary(ShardRouting routing) {
+ return routing.reinitializePrimaryShard();
}
- public static ShardRouting reinit(ShardRouting routing, UnassignedInfo.Reason reason) {
- return routing.reinitializeShard().updateUnassignedInfo(new UnassignedInfo(reason, "test_reinit"));
+ public static ShardRouting reinitPrimary(ShardRouting routing, UnassignedInfo.Reason reason, RecoverySource recoverySource) {
+ return routing.reinitializePrimaryShard().updateUnassigned(new UnassignedInfo(reason, "test_reinit"), recoverySource);
}
- public static ShardRouting initWithSameId(ShardRouting copy) {
- return new ShardRouting(copy.shardId(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(),
- copy.primary(), ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
+ public static ShardRouting initWithSameId(ShardRouting copy, RecoverySource recoverySource) {
+ return new ShardRouting(copy.shardId(), copy.currentNodeId(), copy.relocatingNodeId(),
+ copy.primary(), ShardRoutingState.INITIALIZING, recoverySource, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
copy.allocationId(), copy.getExpectedShardSize());
}
@@ -62,8 +64,8 @@ public class ShardRoutingHelper {
return routing.moveToUnassigned(info);
}
- public static ShardRouting newWithRestoreSource(ShardRouting routing, RestoreSource restoreSource) {
- return new ShardRouting(routing.shardId(), routing.currentNodeId(), routing.relocatingNodeId(), restoreSource,
- routing.primary(), routing.state(), routing.unassignedInfo(), routing.allocationId(), routing.getExpectedShardSize());
+ public static ShardRouting newWithRestoreSource(ShardRouting routing, SnapshotRecoverySource recoverySource) {
+ return new ShardRouting(routing.shardId(), routing.currentNodeId(), routing.relocatingNodeId(), routing.primary(), routing.state(),
+ recoverySource, routing.unassignedInfo(), routing.allocationId(), routing.getExpectedShardSize());
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
index ca38a575ac..d0d6c02f63 100644
--- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
@@ -19,11 +19,16 @@
package org.elasticsearch.cluster.routing;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.index.Index;
+import org.elasticsearch.common.UUIDs;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.test.ESTestCase;
+import static org.elasticsearch.test.ESTestCase.randomAsciiOfLength;
+
/**
* A helper that allows to create shard routing instances within tests, while not requiring to expose
* different simplified constructors on the ShardRouting itself.
@@ -35,7 +40,11 @@ public class TestShardRouting {
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) {
- return new ShardRouting(shardId, currentNodeId, null, null, primary, state, buildUnassignedInfo(state), buildAllocationId(state), -1);
+ return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
+ }
+
+ public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state, RecoverySource recoverySource) {
+ return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, buildUnassignedInfo(state), buildAllocationId(state), -1);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state) {
@@ -43,7 +52,7 @@ public class TestShardRouting {
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state) {
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, null, primary, state, buildUnassignedInfo(state), buildAllocationId(state), -1);
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
@@ -51,33 +60,43 @@ public class TestShardRouting {
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, null, primary, state, buildUnassignedInfo(state), allocationId, -1);
- }
-
- public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state) {
- return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, restoreSource, primary, state);
- }
-
- public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state) {
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, buildUnassignedInfo(state), buildAllocationId(state), -1);
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), allocationId, -1);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
- String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
+ String relocatingNodeId, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
- return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, restoreSource, primary, state, unassignedInfo);
+ return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, primary, state, unassignedInfo);
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId,
- String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
+ String relocatingNodeId, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
- return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, unassignedInfo, buildAllocationId(state), -1);
+ return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), unassignedInfo, buildAllocationId(state), -1);
}
public static ShardRouting relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) {
return shardRouting.relocate(relocatingNodeId, expectedShardSize);
}
+ private static RecoverySource buildRecoveryTarget(boolean primary, ShardRoutingState state) {
+ switch (state) {
+ case UNASSIGNED:
+ case INITIALIZING:
+ if (primary) {
+ return ESTestCase.randomFrom(RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE,
+ RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
+ } else {
+ return RecoverySource.PeerRecoverySource.INSTANCE;
+ }
+ case STARTED:
+ case RELOCATING:
+ return null;
+ default:
+ throw new IllegalStateException("illegal state");
+ }
+ }
+
private static AllocationId buildAllocationId(ShardRoutingState state) {
switch (state) {
case UNASSIGNED:
@@ -105,4 +124,15 @@ public class TestShardRouting {
throw new IllegalStateException("illegal state");
}
}
+
+ public static RecoverySource randomRecoverySource() {
+ return ESTestCase.randomFrom(RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE,
+ RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE,
+ RecoverySource.PeerRecoverySource.INSTANCE,
+ RecoverySource.LocalShardsRecoverySource.INSTANCE,
+ new RecoverySource.SnapshotRecoverySource(
+ new Snapshot("repo", new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID())),
+ Version.CURRENT,
+ "some_index"));
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
index e576d17706..5b00f9f2e8 100644
--- a/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
+++ b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
@@ -30,7 +30,7 @@ public class PathUtilsForTesting {
/** Sets a new default filesystem for testing */
public static void setup() {
- installMock(LuceneTestCase.getBaseTempDirForTestClass().getFileSystem());
+ installMock(LuceneTestCase.createTempDir().getFileSystem());
}
/** Installs a mock filesystem for testing */
diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
index a897de7073..854260f02a 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
@@ -21,11 +21,9 @@ package org.elasticsearch.index;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.index.analysis.AnalysisRegistry;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
@@ -36,7 +34,7 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
-import static org.elasticsearch.test.ESTestCase.createAnalysisService;
+import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
public class MapperTestUtils {
@@ -56,10 +54,10 @@ public class MapperTestUtils {
Settings finalSettings = settingsBuilder.build();
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", finalSettings);
- AnalysisService analysisService = createAnalysisService(indexSettings, finalSettings);
+ IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers;
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
return new MapperService(indexSettings,
- analysisService,
+ indexAnalyzers,
similarityService,
mapperRegistry,
() -> null);
diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
new file mode 100644
index 0000000000..23aed676af
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.shard;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexNotFoundException;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RecoverySource;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingHelper;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.MapperTestUtils;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.query.DisabledQueryCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.UidFieldMapper;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
+import org.elasticsearch.indices.recovery.RecoveryFailedException;
+import org.elasticsearch.indices.recovery.RecoverySourceHandler;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.indices.recovery.StartRecoveryRequest;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.TestThreadPool;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.hasSize;
+
+/**
+ * A base class for unit tests that need to create and shutdown {@link IndexShard} instances easily,
+ * containing utilities for shard creation and recoveries. See {{@link #newShard(boolean)}} and
+ * {@link #newStartedShard()} for a good starting points
+ */
+public abstract class IndexShardTestCase extends ESTestCase {
+
+ protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
+ @Override
+ public void onRecoveryDone(RecoveryState state) {
+
+ }
+
+ @Override
+ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
+ throw new AssertionError(e);
+ }
+ };
+
+ protected ThreadPool threadPool;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new TestThreadPool(getClass().getName());
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ try {
+ ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
+ } finally {
+ super.tearDown();
+ }
+ }
+
+ private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
+ final ShardId shardId = shardPath.getShardId();
+ final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
+ @Override
+ public Directory newDirectory() throws IOException {
+ return newFSDirectory(shardPath.resolveIndex());
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+ };
+ return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
+ }
+
+ /**
+ * creates a new initializing shard. The shard will have its own unique data path.
+ *
+ * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
+ * (ready to recover from another shard)
+ */
+ protected IndexShard newShard(boolean primary) throws IOException {
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), "n1", primary,
+ ShardRoutingState.INITIALIZING,
+ primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
+ return newShard(shardRouting);
+ }
+
+ /**
+ * creates a new initializing shard. The shard will have its own unique data path.
+ *
+ * @param shardRouting the {@link ShardRouting} to use for this shard
+ * @param listeners an optional set of listeners to add to the shard
+ */
+ protected IndexShard newShard(ShardRouting shardRouting, IndexingOperationListener... listeners) throws IOException {
+ assert shardRouting.initializing() : shardRouting;
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .build();
+ IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName())
+ .settings(settings)
+ .primaryTerm(0, 1);
+ return newShard(shardRouting, metaData.build(), listeners);
+ }
+
+ /**
+ * creates a new initializing shard. The shard will have its own unique data path.
+ *
+ * @param shardId the shard id to use
+ * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
+ * (ready to recover from another shard)
+ * @param listeners an optional set of listeners to add to the shard
+ */
+ protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException {
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAsciiOfLength(5), primary,
+ ShardRoutingState.INITIALIZING,
+ primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
+ return newShard(shardRouting, listeners);
+ }
+
+ /**
+ * creates a new initializing shard. The shard will will be put in its proper path under the
+ * supplied node id.
+ *
+ * @param shardId the shard id to use
+ * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
+ * (ready to recover from another shard)
+ */
+ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData,
+ @Nullable IndexSearcherWrapper searcherWrapper) throws IOException {
+ ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING,
+ primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
+ return newShard(shardRouting, indexMetaData, searcherWrapper);
+ }
+
+ /**
+ * creates a new initializing shard. The shard will will be put in its proper path under the
+ * current node id the shard is assigned to.
+ *
+ * @param routing shard routing to use
+ * @param indexMetaData indexMetaData for the shard, including any mapping
+ * @param listeners an optional set of listeners to add to the shard
+ */
+ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners)
+ throws IOException {
+ return newShard(routing, indexMetaData, null, listeners);
+ }
+
+ /**
+ * creates a new initializing shard. The shard will will be put in its proper path under the
+ * current node id the shard is assigned to.
+ *
+ * @param routing shard routing to use
+ * @param indexMetaData indexMetaData for the shard, including any mapping
+ * @param indexSearcherWrapper an optional wrapper to be used during searchers
+ * @param listeners an optional set of listeners to add to the shard
+ */
+ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData,
+ @Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingOperationListener... listeners)
+ throws IOException {
+ // add node id as name to settings for popper logging
+ final ShardId shardId = routing.shardId();
+ final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
+ ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
+ return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, listeners);
+ }
+
+ /**
+ * creates a new initializing shard.
+ *
+ * @param routing shard routing to use
+ * @param shardPath path to use for shard data
+ * @param indexMetaData indexMetaData for the shard, including any mapping
+ * @param indexSearcherWrapper an optional wrapper to be used during searchers
+ * @param listeners an optional set of listeners to add to the shard
+ */
+ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData,
+ @Nullable IndexSearcherWrapper indexSearcherWrapper,
+ IndexingOperationListener... listeners) throws IOException {
+ final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
+ final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
+ final IndexShard indexShard;
+ final Store store = createStore(indexSettings, shardPath);
+ boolean success = false;
+ try {
+ IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
+ MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings());
+ for (ObjectObjectCursor<String, MappingMetaData> typeMapping : indexMetaData.getMappings()) {
+ mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
+ }
+ SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
+ final IndexEventListener indexEventListener = new IndexEventListener() {
+ };
+ final Engine.Warmer warmer = searcher -> {
+ };
+ IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
+ });
+ IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
+ new NoneCircuitBreakerService(), mapperService);
+ indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService,
+ indexFieldDataService, null, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer,
+ Collections.emptyList(), Arrays.asList(listeners));
+ success = true;
+ } finally {
+ if (success == false) {
+ IOUtils.close(store);
+ }
+ }
+ return indexShard;
+ }
+
+ /**
+ * Takes an existing shard, closes it and and starts a new initialing shard at the same location
+ *
+ * @param listeners new listerns to use for the newly created shard
+ */
+ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException {
+ final ShardRouting shardRouting = current.routingEntry();
+ return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting,
+ shardRouting.primary() ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
+ ), listeners);
+ }
+
+ /**
+ * Takes an existing shard, closes it and and starts a new initialing shard at the same location
+ *
+ * @param routing the shard routing to use for the newly created shard.
+ * @param listeners new listerns to use for the newly created shard
+ */
+ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException {
+ closeShards(current);
+ return newShard(routing, current.shardPath(), current.indexSettings().getIndexMetaData(), null, listeners);
+ }
+
+ /**
+ * creates a new empyu shard and starts it. The shard will be either a replica or a primary.
+ */
+ protected IndexShard newStartedShard() throws IOException {
+ return newStartedShard(randomBoolean());
+ }
+
+ /**
+ * creates a new empty shard and starts it.
+ *
+ * @param primary controls whether the shard will be a primary or a replica.
+ */
+ protected IndexShard newStartedShard(boolean primary) throws IOException {
+ IndexShard shard = newShard(primary);
+ if (primary) {
+ recoveryShardFromStore(shard);
+ } else {
+ recoveryEmptyReplica(shard);
+ }
+ return shard;
+ }
+
+ protected void closeShards(IndexShard... shards) throws IOException {
+ closeShards(Arrays.asList(shards));
+ }
+
+ protected void closeShards(Iterable<IndexShard> shards) throws IOException {
+ for (IndexShard shard : shards) {
+ if (shard != null) {
+ try {
+ shard.close("test", false);
+ } finally {
+ IOUtils.close(shard.store());
+ }
+ }
+ }
+ }
+
+ protected void recoveryShardFromStore(IndexShard primary) throws IOException {
+ primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(),
+ getFakeDiscoNode(primary.routingEntry().currentNodeId()),
+ null));
+ primary.recoverFromStore();
+ primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry()));
+ }
+
+ protected void recoveryEmptyReplica(IndexShard replica) throws IOException {
+ IndexShard primary = null;
+ try {
+ primary = newStartedShard(true);
+ recoverReplica(replica, primary);
+ } finally {
+ closeShards(primary);
+ }
+ }
+
+ private DiscoveryNode getFakeDiscoNode(String id) {
+ return new DiscoveryNode(id, new LocalTransportAddress("_fake_" + id), Version.CURRENT);
+ }
+
+ /** recovers a replica from the given primary **/
+ protected void recoverReplica(IndexShard replica, IndexShard primary) throws IOException {
+ recoverReplica(replica, primary,
+ (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> {
+ }),
+ true);
+ }
+
+ /**
+ * Recovers a replica from the give primary, allow the user to supply a custom recovery target.
+ * A typical usage of a custome recovery target is to assert things in the various stages of recovery
+ *
+ * @param markAsRecovering set to false if you have already marked the replica as recovering
+ */
+ protected void recoverReplica(IndexShard replica, IndexShard primary,
+ BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
+ boolean markAsRecovering)
+ throws IOException {
+ final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId());
+ final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId());
+ if (markAsRecovering) {
+ replica.markAsRecovering("remote",
+ new RecoveryState(replica.routingEntry(), pNode, rNode));
+ } else {
+ assertEquals(replica.state(), IndexShardState.RECOVERING);
+ }
+ replica.prepareForIndexRecovery();
+ RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
+ StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
+ getMetadataSnapshotOrEmpty(replica), false, 0);
+ RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {
+ },
+ (int) ByteSizeUnit.MB.toKB(1), logger);
+ recovery.recoverToTarget();
+ recoveryTarget.markAsDone();
+ replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
+ }
+
+ private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
+ Store.MetadataSnapshot result;
+ try {
+ result = replica.snapshotStoreMetadata();
+ } catch (IndexNotFoundException e) {
+ // OK!
+ result = Store.MetadataSnapshot.EMPTY;
+ } catch (IOException e) {
+ logger.warn("failed read store, treating as empty", e);
+ result = Store.MetadataSnapshot.EMPTY;
+ }
+ return result;
+ }
+
+ protected Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
+ shard.refresh("get_uids");
+ try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
+ Set<Uid> ids = new HashSet<>();
+ for (LeafReaderContext leafContext : searcher.reader().leaves()) {
+ LeafReader reader = leafContext.reader();
+ Bits liveDocs = reader.getLiveDocs();
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ if (liveDocs == null || liveDocs.get(i)) {
+ Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
+ ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
+ }
+ }
+ }
+ return ids;
+ }
+ }
+
+ protected void assertDocCount(IndexShard shard, int docDount) throws IOException {
+ assertThat(getShardDocUIDs(shard), hasSize(docDount));
+ }
+
+ protected void assertDocs(IndexShard shard, Uid... uids) throws IOException {
+ final Set<Uid> shardDocUIDs = getShardDocUIDs(shard);
+ assertThat(shardDocUIDs, contains(uids));
+ assertThat(shardDocUIDs, hasSize(uids.length));
+ }
+
+
+ protected Engine.Index indexDoc(IndexShard shard, String type, String id) {
+ return indexDoc(shard, type, id, "{}");
+ }
+
+ protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source) {
+ final Engine.Index index;
+ if (shard.routingEntry().primary()) {
+ index = shard.prepareIndexOnPrimary(
+ SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
+ Versions.MATCH_ANY, VersionType.INTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
+ } else {
+ index = shard.prepareIndexOnReplica(
+ SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
+ 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
+ }
+ shard.index(index);
+ return index;
+ }
+
+ protected Engine.Delete deleteDoc(IndexShard shard, String type, String id) {
+ final Engine.Delete delete;
+ if (shard.routingEntry().primary()) {
+ delete = shard.prepareDeleteOnPrimary(type, id, Versions.MATCH_ANY, VersionType.INTERNAL);
+ } else {
+ delete = shard.prepareDeleteOnPrimary(type, id, 1, VersionType.EXTERNAL);
+ }
+ shard.delete(delete);
+ return delete;
+ }
+
+ protected void flushShard(IndexShard shard) {
+ flushShard(shard, false);
+ }
+
+ protected void flushShard(IndexShard shard, boolean force) {
+ shard.flush(new FlushRequest(shard.shardId().getIndexName()).force(force));
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java
index 638d24e7f9..69dfae2c67 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java
@@ -21,12 +21,13 @@ package org.elasticsearch.index.store;
import com.carrotsearch.randomizedtesting.annotations.Listeners;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.BaseDirectoryTestCase;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TimeUnits;
import org.elasticsearch.bootstrap.BootstrapForTesting;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
/**
@@ -40,9 +41,14 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
public abstract class EsBaseDirectoryTestCase extends BaseDirectoryTestCase {
static {
+ try {
+ Class.forName("org.elasticsearch.test.ESTestCase");
+ } catch (ClassNotFoundException e) {
+ throw new AssertionError(e);
+ }
BootstrapForTesting.ensureInitialized();
}
- protected final ESLogger logger = Loggers.getLogger(getClass());
+ protected final Logger logger = Loggers.getLogger(getClass());
}
diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/IngestDocumentMatcher.java b/test/framework/src/main/java/org/elasticsearch/ingest/IngestDocumentMatcher.java
new file mode 100644
index 0000000000..bb058c5cfd
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/ingest/IngestDocumentMatcher.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest;
+
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertThat;
+
+public class IngestDocumentMatcher {
+ /**
+ * Helper method to assert the equivalence between two IngestDocuments.
+ *
+ * @param a first object to compare
+ * @param b second object to compare
+ */
+ public static void assertIngestDocument(Object a, Object b) {
+ if (a instanceof Map) {
+ Map<?, ?> mapA = (Map<?, ?>) a;
+ Map<?, ?> mapB = (Map<?, ?>) b;
+ for (Map.Entry<?, ?> entry : mapA.entrySet()) {
+ if (entry.getValue() instanceof List || entry.getValue() instanceof Map) {
+ assertIngestDocument(entry.getValue(), mapB.get(entry.getKey()));
+ }
+ }
+ } else if (a instanceof List) {
+ List<?> listA = (List<?>) a;
+ List<?> listB = (List<?>) b;
+ for (int i = 0; i < listA.size(); i++) {
+ Object value = listA.get(i);
+ if (value instanceof List || value instanceof Map) {
+ assertIngestDocument(value, listB.get(i));
+ }
+ }
+ } else if (a instanceof byte[]) {
+ assertArrayEquals((byte[]) a, (byte[])b);
+ } else if (a instanceof IngestDocument) {
+ IngestDocument docA = (IngestDocument) a;
+ IngestDocument docB = (IngestDocument) b;
+ assertIngestDocument(docA.getSourceAndMetadata(), docB.getSourceAndMetadata());
+ assertIngestDocument(docA.getIngestMetadata(), docB.getIngestMetadata());
+ } else {
+ String msg = String.format(Locale.ROOT, "Expected %s class to be equal to %s", a.getClass().getName(), b.getClass().getName());
+ assertThat(msg, a, equalTo(b));
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
index 37da93b825..b838cca4a2 100644
--- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
+++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
@@ -19,18 +19,27 @@
package org.elasticsearch.node;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.MockBigArrays;
+import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.MockSearchService;
import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.fetch.FetchPhase;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportInterceptor;
+import org.elasticsearch.transport.TransportService;
import java.util.Collection;
-import java.util.List;
/**
* A node for testing which allows:
@@ -62,11 +71,29 @@ public class MockNode extends Node {
return new MockBigArrays(settings, circuitBreakerService);
}
+
@Override
- protected Class<? extends SearchService> pickSearchServiceImplementation() {
+ protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
+ ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
+ FetchPhase fetchPhase) {
if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) {
- return super.pickSearchServiceImplementation();
+ return super.newSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
+ }
+ return new MockSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
+ }
+
+ @Override
+ protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool,
+ TransportInterceptor interceptor) {
+ // we use the MockTransportService.TestPlugin class as a marker to create a newtwork
+ // module with this MockNetworkService. NetworkService is such an integral part of the systme
+ // we don't allow to plug it in from plugins or anything. this is a test-only override and
+ // can't be done in a production env.
+ if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).size() == 1) {
+ return new MockTransportService(settings, transport, threadPool, interceptor);
+ } else {
+ return super.newTransportService(settings, transport, threadPool, interceptor);
}
- return MockSearchService.class;
}
}
+
diff --git a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java
index db8b6825ec..cf565499a8 100644
--- a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java
+++ b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java
@@ -18,6 +18,8 @@
*/
package org.elasticsearch.node;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -30,6 +32,10 @@ import java.nio.file.Path;
import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
public class NodeTests extends ESTestCase {
@@ -55,4 +61,29 @@ public class NodeTests extends ESTestCase {
}
}
}
+
+ public void testWarnIfPreRelease() {
+ final Logger logger = mock(Logger.class);
+
+ final int id = randomIntBetween(1, 9) * 1000000;
+ final Version releaseVersion = Version.fromId(id + 99);
+ final Version preReleaseVersion = Version.fromId(id + randomIntBetween(0, 98));
+
+ Node.warnIfPreRelease(releaseVersion, false, logger);
+ verifyNoMoreInteractions(logger);
+
+ reset(logger);
+ Node.warnIfPreRelease(releaseVersion, true, logger);
+ verify(logger).warn(
+ "version [{}] is a pre-release version of Elasticsearch and is not suitable for production", releaseVersion + "-SNAPSHOT");
+
+ reset(logger);
+ final boolean isSnapshot = randomBoolean();
+ Node.warnIfPreRelease(preReleaseVersion, isSnapshot, logger);
+ verify(logger).warn(
+ "version [{}] is a pre-release version of Elasticsearch and is not suitable for production",
+ preReleaseVersion + (isSnapshot ? "-SNAPSHOT" : ""));
+
+ }
+
}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
index cae5b2ff95..bf300889cd 100644
--- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
+++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
@@ -20,9 +20,6 @@
package org.elasticsearch.search;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.MockNode;
@@ -69,11 +66,10 @@ public class MockSearchService extends SearchService {
ACTIVE_SEARCH_CONTEXTS.remove(context);
}
- @Inject
- public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService,
+ public MockSearchService(ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService,
BigArrays bigArrays, FetchPhase fetchPhase) {
- super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
+ super(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
}
@Override
diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
index eb852ec611..6225f5fa5d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
@@ -39,17 +39,13 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
-import org.elasticsearch.common.inject.Injector;
-import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.inject.ModulesBuilder;
-import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -62,15 +58,18 @@ import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
@@ -82,7 +81,6 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.analysis.AnalysisModule;
-import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
@@ -97,7 +95,6 @@ import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.internal.SearchContext;
-import org.elasticsearch.threadpool.ThreadPool;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.junit.After;
@@ -113,6 +110,9 @@ import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Deque;
+import java.util.HashSet;
+import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@@ -137,7 +137,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String OBJECT_FIELD_NAME = "mapped_object";
protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point";
- protected static final String GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true";
+ protected static final String LEGACY_GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true";
protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape";
protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME,
BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME};
@@ -153,6 +153,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
private static String[] currentTypes;
private static String[] randomTypes;
+
protected static Index getIndex() {
return index;
}
@@ -314,22 +315,41 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
/**
* Test that adding an additional object within each object of the otherwise correct query always triggers some kind of
* parse exception. Some specific objects do not cause any exception as they can hold arbitrary content; they can be
- * declared by overriding {@link #getObjectsHoldingArbitraryContent()}
+ * declared by overriding {@link #getObjectsHoldingArbitraryContent()}.
*/
public final void testUnknownObjectException() throws IOException {
- String validQuery = createTestQueryBuilder().toString();
- unknownObjectExceptionTest(validQuery);
- for (String query : getAlternateVersions().keySet()) {
- unknownObjectExceptionTest(query);
+ Set<String> candidates = new HashSet<>();
+ // Adds the valid query to the list of queries to modify and test
+ candidates.add(createTestQueryBuilder().toString());
+ // Adds the alternates versions of the query too
+ candidates.addAll(getAlternateVersions().keySet());
+
+ List<Tuple<String, Boolean>> testQueries = alterateQueries(candidates, getObjectsHoldingArbitraryContent());
+ for (Tuple<String, Boolean> testQuery : testQueries) {
+ boolean expectedException = testQuery.v2();
+ try {
+ parseQuery(testQuery.v1());
+ if (expectedException) {
+ fail("some parsing exception expected for query: " + testQuery);
+ }
+ } catch (ParsingException | ElasticsearchParseException e) {
+ // different kinds of exception wordings depending on location
+ // of mutation, so no simple asserts possible here
+ if (expectedException == false) {
+ throw new AssertionError("unexpected exception when parsing query:\n" + testQuery, e);
+ }
+ } catch (IllegalArgumentException e) {
+ if (expectedException == false) {
+ throw new AssertionError("unexpected exception when parsing query:\n" + testQuery, e);
+ }
+ assertThat(e.getMessage(), containsString("unknown field [newField], parser not found"));
+ }
}
}
/**
- * Traverses the json tree of the valid query provided as argument and mutates it by adding one object within each object
- * encountered. Every mutation is a separate iteration, which will be followed by its corresponding assertions to verify that
- * a parse exception is thrown when parsing the modified query. Some specific objects do not cause any exception as they can
- * hold arbitrary content; they can be declared by overriding {@link #getObjectsHoldingArbitraryContent()}, and for those we
- * will verify that no exception gets thrown instead.
+ * Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each
+ * object encountered.
*
* For instance given the following valid term query:
* {
@@ -360,97 +380,81 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
* }
* }
* }
+ *
+ * Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not
+ * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the
+ * arbitraryMarkers parameter.
*/
- private void unknownObjectExceptionTest(String validQuery) throws IOException {
- //TODO building json by concatenating strings makes the code unmaintainable, we should rewrite this test
- assertThat(validQuery, containsString("{"));
- int level = 0;
- //track whether we are within quotes as we may have randomly generated strings containing curly brackets
- boolean withinQuotes = false;
- boolean expectedException = true;
- int objectHoldingArbitraryContentLevel = 0;
- for (int insertionPosition = 0; insertionPosition < validQuery.length(); insertionPosition++) {
- if (validQuery.charAt(insertionPosition) == '"') {
- withinQuotes = withinQuotes == false;
- } else if (withinQuotes == false && validQuery.charAt(insertionPosition) == '}') {
- level--;
- if (expectedException == false) {
- //track where we are within the object that holds arbitrary content
- objectHoldingArbitraryContentLevel--;
- }
- if (objectHoldingArbitraryContentLevel == 0) {
- //reset the flag once we have traversed the whole object that holds arbitrary content
- expectedException = true;
- }
- } else if (withinQuotes == false && validQuery.charAt(insertionPosition) == '{') {
- //keep track of which level we are within the json so that we can properly close the additional object
- level++;
- //if we don't expect an exception, it means that we are within an object that can contain arbitrary content.
- //in that case we ignore the whole object including its children, no need to even check where we are.
- if (expectedException) {
- int startCurrentObjectName = -1;
- int endCurrentObjectName = -1;
- //look backwards for the current object name, to find out whether we expect an exception following its mutation
- for (int i = insertionPosition; i >= 0; i--) {
- if (validQuery.charAt(i) == '}') {
- break;
- } else if (validQuery.charAt(i) == '"') {
- if (endCurrentObjectName == -1) {
- endCurrentObjectName = i;
- } else if (startCurrentObjectName == -1) {
- startCurrentObjectName = i + 1;
- } else {
- break;
+ static List<Tuple<String, Boolean>> alterateQueries(Set<String> queries, Set<String> arbitraryMarkers) throws IOException {
+ List<Tuple<String, Boolean>> results = new ArrayList<>();
+
+ // Indicate if a part of the query can hold any arbitrary content
+ boolean hasArbitraryContent = (arbitraryMarkers != null && arbitraryMarkers.isEmpty() == false);
+
+ for (String query : queries) {
+ // Track the number of query mutations
+ int mutation = 0;
+
+ while (true) {
+ boolean expectException = true;
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ try (
+ XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out);
+ XContentParser parser = XContentHelper.createParser(new BytesArray(query));
+ ) {
+ int objectIndex = -1;
+ Deque<String> levels = new LinkedList<>();
+
+ // Parse the valid query and inserts a new object level called "newField"
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != null) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ objectIndex++;
+ levels.addLast(parser.currentName());
+
+ if (objectIndex == mutation) {
+ // We reached the place in the object tree where we want to insert a new object level
+ generator.writeStartObject();
+ generator.writeFieldName("newField");
+ XContentHelper.copyCurrentStructure(generator, parser);
+ generator.writeEndObject();
+
+ if (hasArbitraryContent) {
+ // The query has one or more fields that hold arbitrary content. If the current
+ // field is one (or a child) of those, no exception is expected when parsing the mutated query.
+ for (String marker : arbitraryMarkers) {
+ if (levels.contains(marker)) {
+ expectException = false;
+ break;
+ }
+ }
+ }
+
+ // Jump to next token
+ continue;
}
+ } else if (token == XContentParser.Token.END_OBJECT) {
+ levels.removeLast();
}
- }
- if (startCurrentObjectName >= 0 && endCurrentObjectName > 0) {
- String currentObjectName = validQuery.substring(startCurrentObjectName, endCurrentObjectName);
- expectedException = getObjectsHoldingArbitraryContent().contains(currentObjectName) == false;
- }
- }
- if (expectedException == false) {
- objectHoldingArbitraryContentLevel++;
- }
- //inject the start of the new object
- String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : ";
- String secondPart = validQuery.substring(insertionPosition);
- int currentLevel = level;
- boolean quotes = false;
- for (int i = 0; i < secondPart.length(); i++) {
- if (secondPart.charAt(i) == '"') {
- quotes = quotes == false;
- } else if (quotes == false && secondPart.charAt(i) == '{') {
- currentLevel++;
- } else if (quotes == false && secondPart.charAt(i) == '}') {
- currentLevel--;
- if (currentLevel == level) {
- //close the additional object in the right place
- testQuery += secondPart.substring(0, i - 1) + "}" + secondPart.substring(i);
- break;
- }
- }
- }
- try {
- parseQuery(testQuery);
- if (expectedException) {
- fail("some parsing exception expected for query: " + testQuery);
- }
- } catch (ParsingException | ElasticsearchParseException e) {
- // different kinds of exception wordings depending on location
- // of mutation, so no simple asserts possible here
- if (expectedException == false) {
- throw new AssertionError("unexpected exception when parsing query:\n" + testQuery, e);
+ // We are walking through the object tree, so we can safely copy the current node
+ XContentHelper.copyCurrentEvent(generator, parser);
}
- } catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), containsString("unknown field [newField], parser not found"));
- if (expectedException == false) {
- throw new AssertionError("unexpected exception when parsing query:\n" + testQuery, e);
+
+ if (objectIndex < mutation) {
+ // We did not reach the insertion point, there's no more mutations to try
+ break;
+ } else {
+ // We reached the expected insertion point, so next time we'll try one step further
+ mutation++;
}
}
+
+ results.add(new Tuple<>(out.bytes().utf8ToString(), expectException));
}
}
+ return results;
}
/**
@@ -947,8 +951,8 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
* <li> Take a reference documentation example.
* <li> Stick it into the createParseableQueryJson method of the respective query test.
* <li> Manually check that what the QueryBuilder generates equals the input json ignoring default options.
- * <li> Put the manual checks into the asserQueryParsedFromJson method.
- * <li> Now copy the generated json including default options into createParseableQueryJso
+ * <li> Put the manual checks into the assertQueryParsedFromJson method.
+ * <li> Now copy the generated json including default options into createParseableQueryJson
* <li> By now the roundtrip check for the json should be happy.
* </ul>
**/
@@ -1005,7 +1009,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
private static class ServiceHolder implements Closeable {
- private final Injector injector;
private final IndicesQueriesRegistry indicesQueriesRegistry;
private final IndexFieldDataService indexFieldDataService;
private final SearchModule searchModule;
@@ -1016,18 +1019,14 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
private final MapperService mapperService;
private final BitsetFilterCache bitsetFilterCache;
private final ScriptService scriptService;
+ private final Client client;
ServiceHolder(Settings nodeSettings, Settings indexSettings,
Collection<Class<? extends Plugin>> plugins, AbstractQueryTestCase<?> testCase) throws IOException {
- final ThreadPool threadPool = new ThreadPool(nodeSettings);
- ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
- ClusterServiceUtils.setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(
- new MetaData.Builder().put(new IndexMetaData.Builder(
- index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null);
PluginsService pluginsService = new PluginsService(nodeSettings, env.modulesFile(), env.pluginsFile(), plugins);
- final Client proxy = (Client) Proxy.newProxyInstance(
+ client = (Client) Proxy.newProxyInstance(
Client.class.getClassLoader(),
new Class[]{Client.class},
clientInvocationHandler);
@@ -1036,53 +1035,24 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
scriptSettings.addAll(pluginsService.getPluginSettings());
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter());
- searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)) {
- @Override
- protected void configureSearch() {
- // Skip me
- }
- };
- IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)) {
- @Override
- public void configure() {
- // skip services
- bindMapperExtension();
- }
- };
+ searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
+ IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
- ModulesBuilder modulesBuilder = new ModulesBuilder();
- for (Module pluginModule : pluginsService.createGuiceModules()) {
- modulesBuilder.add(pluginModule);
- }
- modulesBuilder.add(
- b -> {
- b.bind(PluginsService.class).toInstance(pluginsService);
- b.bind(Environment.class).toInstance(new Environment(nodeSettings));
- b.bind(ThreadPool.class).toInstance(threadPool);
- b.bind(Client.class).toInstance(proxy);
- b.bind(ClusterService.class).toProvider(Providers.of(clusterService));
- b.bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
- b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
- },
- settingsModule, indicesModule, searchModule, new IndexSettingsModule(index, indexSettings)
- );
- pluginsService.processModules(modulesBuilder);
- injector = modulesBuilder.createInjector();
- IndexScopedSettings indexScopedSettings = injector.getInstance(IndexScopedSettings.class);
+ IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings();
idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings);
AnalysisModule analysisModule = new AnalysisModule(new Environment(nodeSettings), emptyList());
- AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings);
+ IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
scriptService = scriptModule.getScriptService();
similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
- MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class);
- mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, this::createShardContext);
+ MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
+ mapperService = new MapperService(idxSettings, indexAnalyzers, similarityService, mapperRegistry, this::createShardContext);
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache,
- injector.getInstance(CircuitBreakerService.class), mapperService);
+ new NoneCircuitBreakerService(), mapperService);
bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
@@ -1094,7 +1064,10 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
}
});
- indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
+ indicesQueriesRegistry = searchModule.getQueryParserRegistry();
+
+ String geoFieldMapping = (idxSettings.getIndexVersionCreated().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) ?
+ LEGACY_GEO_POINT_FIELD_MAPPING : "type=geo_point";
for (String type : currentTypes) {
mapperService.merge(type, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(type,
@@ -1105,7 +1078,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object",
- GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING,
+ GEO_POINT_FIELD_NAME, geoFieldMapping,
GEO_SHAPE_FIELD_NAME, "type=geo_shape"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
// also add mappings for two inner field in the object field
@@ -1115,24 +1088,17 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
MapperService.MergeReason.MAPPING_UPDATE, false);
}
testCase.initializeAdditionalMappings(mapperService);
- this.namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class);
+ this.namedWriteableRegistry = namedWriteableRegistry;
}
@Override
public void close() throws IOException {
- injector.getInstance(ClusterService.class).close();
- try {
- terminate(injector.getInstance(ThreadPool.class));
- } catch (InterruptedException e) {
- IOUtils.reThrow(e);
- }
}
QueryShardContext createShardContext() {
ClusterState state = ClusterState.builder(new ClusterName("_name")).build();
- Client client = injector.getInstance(Client.class);
return new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService,
- scriptService, indicesQueriesRegistry, client, null, state);
+ scriptService, indicesQueriesRegistry, this.client, null, state);
}
ScriptModule createScriptModule(List<ScriptPlugin> scriptPlugins) {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
index 4440fbe117..3c5f105e4d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
@@ -20,19 +20,24 @@ package org.elasticsearch.test;/*
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.junit.Assert;
import java.io.IOException;
import java.util.Random;
+import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
@@ -45,17 +50,18 @@ import static org.hamcrest.Matchers.equalTo;
public class BackgroundIndexer implements AutoCloseable {
- private final ESLogger logger = Loggers.getLogger(getClass());
+ private final Logger logger = Loggers.getLogger(getClass());
final Thread[] writers;
final CountDownLatch stopLatch;
final CopyOnWriteArrayList<Exception> failures;
final AtomicBoolean stop = new AtomicBoolean(false);
final AtomicLong idGenerator = new AtomicLong();
- final AtomicLong indexCounter = new AtomicLong();
final CountDownLatch startLatch = new CountDownLatch(1);
final AtomicBoolean hasBudget = new AtomicBoolean(false); // when set to true, writers will acquire writes from a semaphore
final Semaphore availableBudget = new Semaphore(0);
+ final boolean useAutoGeneratedIDs;
+ private final Set<String> ids = ConcurrentCollections.newConcurrentSet();
volatile int minFieldSize = 10;
volatile int maxFieldSize = 140;
@@ -116,6 +122,7 @@ public class BackgroundIndexer implements AutoCloseable {
if (random == null) {
random = RandomizedTest.getRandom();
}
+ useAutoGeneratedIDs = random.nextBoolean();
failures = new CopyOnWriteArrayList<>();
writers = new Thread[writerCount];
stopLatch = new CountDownLatch(writers.length);
@@ -145,12 +152,17 @@ public class BackgroundIndexer implements AutoCloseable {
BulkRequestBuilder bulkRequest = client.prepareBulk();
for (int i = 0; i < batchSize; i++) {
id = idGenerator.incrementAndGet();
- bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)));
+ if (useAutoGeneratedIDs) {
+ bulkRequest.add(client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)));
+ } else {
+ bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)));
+ }
}
BulkResponse bulkResponse = bulkRequest.get();
for (BulkItemResponse bulkItemResponse : bulkResponse) {
if (!bulkItemResponse.isFailed()) {
- indexCounter.incrementAndGet();
+ boolean add = ids.add(bulkItemResponse.getId());
+ assert add : "ID: " + bulkItemResponse.getId() + " already used";
} else {
throw new ElasticsearchException("bulk request failure, id: ["
+ bulkItemResponse.getFailure().getId() + "] message: " + bulkItemResponse.getFailure().getMessage());
@@ -164,14 +176,24 @@ public class BackgroundIndexer implements AutoCloseable {
continue;
}
id = idGenerator.incrementAndGet();
- client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get();
- indexCounter.incrementAndGet();
+ if (useAutoGeneratedIDs) {
+ IndexResponse indexResponse = client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)).get();
+ boolean add = ids.add(indexResponse.getId());
+ assert add : "ID: " + indexResponse.getId() + " already used";
+ } else {
+ IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get();
+ boolean add = ids.add(indexResponse.getId());
+ assert add : "ID: " + indexResponse.getId() + " already used";
+ }
}
}
- logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), indexCounter.get());
+ logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), ids.size());
} catch (Exception e) {
failures.add(e);
- logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id);
+ final long docId = id;
+ logger.warn(
+ (Supplier<?>)
+ () -> new ParameterizedMessage("**** failed indexing thread {} on doc id {}", indexerId, docId), e);
} finally {
stopLatch.countDown();
}
@@ -259,7 +281,7 @@ public class BackgroundIndexer implements AutoCloseable {
}
public long totalIndexedDocs() {
- return indexCounter.get();
+ return ids.size();
}
public Throwable[] getFailures() {
@@ -284,4 +306,11 @@ public class BackgroundIndexer implements AutoCloseable {
public void close() throws Exception {
stop();
}
+
+ /**
+ * Returns the ID set of all documents indexed by this indexer run
+ */
+ public Set<String> getIds() {
+ return this.ids;
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java
index a6d35930e6..38682239b7 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java
@@ -42,11 +42,16 @@ import static junit.framework.TestCase.fail;
public class ClusterServiceUtils {
public static ClusterService createClusterService(ThreadPool threadPool) {
+ DiscoveryNode discoveryNode = new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
+ new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT);
+ return createClusterService(threadPool, discoveryNode);
+ }
+
+ public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode) {
ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool);
- clusterService.setLocalNode(new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
- new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT));
+ clusterService.setLocalNode(localNode);
clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
@Override
public void connectToAddedNodes(ClusterChangedEvent event) {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
index 916adc142c..df306dfc9e 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
@@ -19,13 +19,13 @@
package org.elasticsearch.test;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.IOException;
@@ -44,7 +44,7 @@ import static org.junit.Assert.assertTrue;
public final class CorruptionUtils {
- private static ESLogger logger = ESLoggerFactory.getLogger("test");
+ private static Logger logger = ESLoggerFactory.getLogger("test");
private CorruptionUtils() {}
/**
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
index e723f970bb..645801b316 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
@@ -100,7 +100,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
@@ -368,14 +368,14 @@ public abstract class ESIntegTestCase extends ESTestCase {
// TODO move settings for random directory etc here into the index based randomized settings.
if (cluster().size() > 0) {
Settings.Builder randomSettingsBuilder =
- setRandomIndexSettings(random(), Settings.builder());
+ setRandomIndexSettings(random(), Settings.builder());
if (isInternalCluster()) {
// this is only used by mock plugins and if the cluster is not internal we just can't set it
randomSettingsBuilder.put(INDEX_TEST_SEED_SETTING.getKey(), random().nextLong());
}
randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards())
- .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas());
+ .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas());
// if the test class is annotated with SuppressCodecs("*"), it means don't use lucene's codec randomization
// otherwise, use it, it has assertions and so on that can find bugs.
@@ -404,10 +404,10 @@ public abstract class ESIntegTestCase extends ESTestCase {
randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean());
}
PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
- .preparePutTemplate("random_index_template")
- .setTemplate("*")
- .setOrder(0)
- .setSettings(randomSettingsBuilder);
+ .preparePutTemplate("random_index_template")
+ .setTemplate("*")
+ .setOrder(0)
+ .setSettings(randomSettingsBuilder);
if (mappings != null) {
logger.info("test using _default_ mappings: [{}]", mappings.bytes().utf8ToString());
putTemplate.addMapping("_default_", mappings);
@@ -443,7 +443,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) {
if (random.nextBoolean()) {
builder.put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(),
- random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
+ random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
}
switch (random.nextInt(4)) {
case 3:
@@ -525,9 +525,9 @@ public abstract class ESIntegTestCase extends ESTestCase {
if (currentClusterScope != Scope.TEST) {
MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData
- .persistentSettings().getAsMap().size(), equalTo(0));
+ .persistentSettings().getAsMap().size(), equalTo(0));
assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData
- .transientSettings().getAsMap().size(), equalTo(0));
+ .transientSettings().getAsMap().size(), equalTo(0));
}
ensureClusterSizeConsistency();
ensureClusterStateConsistency();
@@ -540,7 +540,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
@Override
public void run() {
assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"),
- zenDiscovery.pendingClusterStates(), emptyArray());
+ zenDiscovery.pendingClusterStates(), emptyArray());
}
});
}
@@ -829,7 +829,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
String failMsg = sb.toString();
for (SearchHit hit : searchResponse.getHits().getHits()) {
sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
- .append("] id [").append(hit.id()).append("]");
+ .append("] id [").append(hit.id()).append("]");
}
logger.warn("{}", sb);
fail(failMsg);
@@ -873,7 +873,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
ClusterHealthResponse actionGet = client().admin().cluster()
- .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
fail("timed out waiting for green state");
@@ -895,12 +895,12 @@ public abstract class ESIntegTestCase extends ESTestCase {
* using the cluster health API.
*/
public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
- ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
+ ClusterHealthRequest request = Requests.clusterHealthRequest().waitForNoRelocatingShards(true);
if (status != null) {
request.waitForStatus(status);
}
ClusterHealthResponse actionGet = client().admin().cluster()
- .health(request).actionGet();
+ .health(request).actionGet();
if (actionGet.isTimedOut()) {
logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
@@ -945,7 +945,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
* @return the actual number of docs seen.
*/
public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, @Nullable final BackgroundIndexer indexer)
- throws InterruptedException {
+ throws InterruptedException {
final AtomicLong lastKnownCount = new AtomicLong(-1);
long lastStartCount = -1;
BooleanSupplier testDocs = () -> {
@@ -988,8 +988,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
public void setMinimumMasterNodes(int n) {
assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
- Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n))
- .get().isAcknowledged());
+ Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n))
+ .get().isAcknowledged());
}
/**
@@ -997,7 +997,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
public ClusterHealthStatus ensureYellow(String... indices) {
ClusterHealthResponse actionGet = client().admin().cluster()
- .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
+ .health(Requests.clusterHealthRequest(indices).waitForNoRelocatingShards(true).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false));
@@ -1019,7 +1019,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
public void logSegmentsState(String... indices) throws Exception {
IndicesSegmentResponse segsRsp = client().admin().indices().prepareSegments(indices).get();
logger.debug("segments {} state: \n{}", indices.length == 0 ? "[_all]" : indices,
- segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string());
+ segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string());
}
/**
@@ -1102,16 +1102,16 @@ public abstract class ESIntegTestCase extends ESTestCase {
}
logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue);
ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth()
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForNodes(Integer.toString(nodeCount))
- .setTimeout(timeValue)
- .setLocal(local)
- .setWaitForRelocatingShards(0)
- .get();
+ .setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes(Integer.toString(nodeCount))
+ .setTimeout(timeValue)
+ .setLocal(local)
+ .setWaitForNoRelocatingShards(true)
+ .get();
if (clusterHealthResponse.isTimedOut()) {
ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get();
fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n"
- + stateResponse.getState().prettyPrint());
+ + stateResponse.getState().prettyPrint());
}
assertThat(clusterHealthResponse.isTimedOut(), is(false));
}
@@ -1204,7 +1204,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
protected final FlushResponse flush(String... indices) {
waitForRelocation();
- FlushResponse actionGet = client().admin().indices().prepareFlush(indices).setWaitIfOngoing(true).execute().actionGet();
+ FlushResponse actionGet = client().admin().indices().prepareFlush(indices).execute().actionGet();
for (ShardOperationFailedException failure : actionGet.getShardFailures()) {
assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
}
@@ -1234,7 +1234,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
protected final void enableAllocation(String... indices) {
client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(
- EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"
+ EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"
)).get();
}
@@ -1243,7 +1243,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
protected final void disableAllocation(String... indices) {
client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(
- EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"
+ EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"
)).get();
}
@@ -1357,7 +1357,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
}
} else {
List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE,
- Math.max(1, (int) (builders.size() * randomDouble()))));
+ Math.max(1, (int) (builders.size() * randomDouble()))));
logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
for (List<IndexRequestBuilder> segmented : partition) {
BulkRequestBuilder bulkBuilder = client().prepareBulk();
@@ -1426,18 +1426,18 @@ public abstract class ESIntegTestCase extends ESTestCase {
if (rarely()) {
if (rarely()) {
client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
- new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
} else if (maybeFlush && rarely()) {
if (randomBoolean()) {
client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
- new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
} else {
client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()),
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
}
} else if (rarely()) {
client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
- new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
}
}
while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) {
@@ -1567,7 +1567,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
public void clearScroll(String... scrollIds) {
ClearScrollResponse clearResponse = client().prepareClearScroll()
- .setScrollIds(Arrays.asList(scrollIds)).get();
+ .setScrollIds(Arrays.asList(scrollIds)).get();
assertThat(clearResponse.isSucceeded(), equalTo(true));
}
@@ -1631,20 +1631,20 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder()
- .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE)
- // Default the watermarks to absurdly low to prevent the tests
- // from failing on nodes without enough disk space
- .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")
- .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b")
- .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000)
- .put("script.stored", "true")
- .put("script.inline", "true")
- // by default we never cache below 10k docs in a segment,
- // bypass this limit so that caching gets some testing in
- // integration tests that usually create few documents
- .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0)
- // wait short time for other active shards before actually deleting, default 30s not needed in tests
- .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS));
+ .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE)
+ // Default the watermarks to absurdly low to prevent the tests
+ // from failing on nodes without enough disk space
+ .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")
+ .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b")
+ .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000)
+ .put("script.stored", "true")
+ .put("script.inline", "true")
+ // by default we never cache below 10k docs in a segment,
+ // bypass this limit so that caching gets some testing in
+ // integration tests that usually create few documents
+ .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0)
+ // wait short time for other active shards before actually deleting, default 30s not needed in tests
+ .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS));
return builder.build();
}
@@ -1739,8 +1739,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
mockPlugins = mocks;
}
return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, minNumDataNodes, maxNumDataNodes,
- InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
- InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper());
+ InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
+ InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper());
}
protected NodeConfigurationSource getNodeConfigSource() {
@@ -1772,7 +1772,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(),
isNetwork ? DiscoveryModule.DISCOVERY_TYPE_SETTING.getDefault(Settings.EMPTY) : "local")
.put(networkSettings.build()).
- put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build();
+ put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build();
}
@Override
@@ -2064,15 +2064,15 @@ public abstract class ESIntegTestCase extends ESTestCase {
}
throw new IllegalStateException(builder.toString());
}
- Path src = list[0];
- Path dest = dataDir.resolve(internalCluster().getClusterName());
+ Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER);
+ Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER);
assertTrue(Files.exists(src));
Files.move(src, dest);
assertFalse(Files.exists(src));
assertTrue(Files.exists(dest));
Settings.Builder builder = Settings.builder()
- .put(settings)
- .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath());
+ .put(settings)
+ .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath());
Path configDir = indexDir.resolve("config");
if (Files.exists(configDir)) {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
index 98d5e0d339..4916d7df2f 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
@@ -28,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -42,6 +41,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.internal.SearchContext;
@@ -59,7 +59,6 @@ import java.util.Collections;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
/**
@@ -186,7 +185,11 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
.put(nodeSettings()) // allow test cases to provide their own settings or override these
.build();
Node build = new MockNode(settings, getPlugins());
- build.start();
+ try {
+ build.start();
+ } catch (NodeValidationException e) {
+ throw new RuntimeException(e);
+ }
return build;
}
@@ -259,7 +262,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
// Wait for the index to be allocated so that cluster state updates don't override
// changes that would have been done locally
ClusterHealthResponse health = client().admin().cluster()
- .health(Requests.clusterHealthRequest(index).waitForYellowStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ .health(Requests.clusterHealthRequest(index).waitForYellowStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet();
assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW));
assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1));
IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class);
@@ -302,7 +305,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
*/
public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
ClusterHealthResponse actionGet = client().admin().cluster()
- .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
index 784a0a4bed..799fdbf894 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
@@ -29,7 +29,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter;
-
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.uninverting.UninvertingReader;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -43,8 +43,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.io.PathUtilsForTesting;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.MockBigArrays;
@@ -57,14 +55,18 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.AnalysisRegistry;
+import org.elasticsearch.index.analysis.CharFilterFactory;
+import org.elasticsearch.index.analysis.IndexAnalyzers;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.index.analysis.TokenizerFactory;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.AnalysisModule;
-import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.AnalysisPlugin;
+import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.script.MockScriptEngine;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
@@ -130,10 +132,17 @@ import static org.hamcrest.Matchers.equalTo;
public abstract class ESTestCase extends LuceneTestCase {
static {
+ System.setProperty("log4j.shutdownHookEnabled", "false");
+ // we can not shutdown logging when tests are running or the next test that runs within the
+ // same JVM will try to initialize logging after a security manager has been installed and
+ // this will fail
+ System.setProperty("es.log4j.shutdownEnabled", "false");
+ System.setProperty("log4j2.disable.jmx", "true");
+ System.setProperty("log4j.skipJansi", "true"); // jython has this crazy shaded Jansi version that log4j2 tries to load
BootstrapForTesting.ensureInitialized();
}
- protected final ESLogger logger = Loggers.getLogger(getClass());
+ protected final Logger logger = Loggers.getLogger(getClass());
// -----------------------------------------------------------------
// Suite and test case setup/cleanup.
@@ -298,6 +307,14 @@ public abstract class ESTestCase extends LuceneTestCase {
return random().nextInt();
}
+ public static long randomPositiveLong() {
+ long randomLong;
+ do {
+ randomLong = randomLong();
+ } while (randomLong == Long.MIN_VALUE);
+ return Math.abs(randomLong);
+ }
+
public static float randomFloat() {
return random().nextFloat();
}
@@ -797,35 +814,37 @@ public abstract class ESTestCase extends LuceneTestCase {
}
/**
- * Creates an AnalysisService with all the default analyzers configured.
+ * Creates an TestAnalysis with all the default analyzers configured.
*/
- public static AnalysisService createAnalysisService(Index index, Settings settings, AnalysisPlugin... analysisPlugins)
+ public static TestAnalysis createTestAnalysis(Index index, Settings settings, AnalysisPlugin... analysisPlugins)
throws IOException {
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
- return createAnalysisService(index, nodeSettings, settings, analysisPlugins);
+ return createTestAnalysis(index, nodeSettings, settings, analysisPlugins);
}
/**
- * Creates an AnalysisService with all the default analyzers configured.
+ * Creates an TestAnalysis with all the default analyzers configured.
*/
- public static AnalysisService createAnalysisService(Index index, Settings nodeSettings, Settings settings,
- AnalysisPlugin... analysisPlugins) throws IOException {
+ public static TestAnalysis createTestAnalysis(Index index, Settings nodeSettings, Settings settings,
+ AnalysisPlugin... analysisPlugins) throws IOException {
Settings indexSettings = Settings.builder().put(settings)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.build();
- return createAnalysisService(IndexSettingsModule.newIndexSettings(index, indexSettings), nodeSettings, analysisPlugins);
+ return createTestAnalysis(IndexSettingsModule.newIndexSettings(index, indexSettings), nodeSettings, analysisPlugins);
}
/**
- * Creates an AnalysisService with all the default analyzers configured.
+ * Creates an TestAnalysis with all the default analyzers configured.
*/
- public static AnalysisService createAnalysisService(IndexSettings indexSettings, Settings nodeSettings,
- AnalysisPlugin... analysisPlugins) throws IOException {
+ public static TestAnalysis createTestAnalysis(IndexSettings indexSettings, Settings nodeSettings,
+ AnalysisPlugin... analysisPlugins) throws IOException {
Environment env = new Environment(nodeSettings);
AnalysisModule analysisModule = new AnalysisModule(env, Arrays.asList(analysisPlugins));
- final AnalysisService analysisService = analysisModule.getAnalysisRegistry()
- .build(indexSettings);
- return analysisService;
+ AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry();
+ return new TestAnalysis(analysisRegistry.build(indexSettings),
+ analysisRegistry.buildTokenFilterFactories(indexSettings),
+ analysisRegistry.buildTokenizerFactories(indexSettings),
+ analysisRegistry.buildCharFilterFactories(indexSettings));
}
public static ScriptModule newTestScriptModule() {
@@ -855,4 +874,27 @@ public abstract class ESTestCase extends LuceneTestCase {
}
));
}
+
+ /**
+ * This cute helper class just holds all analysis building blocks that are used
+ * to build IndexAnalyzers. This is only for testing since in production we only need the
+ * result and we don't even expose it there.
+ */
+ public static final class TestAnalysis {
+
+ public final IndexAnalyzers indexAnalyzers;
+ public final Map<String, TokenFilterFactory> tokenFilter;
+ public final Map<String, TokenizerFactory> tokenizer;
+ public final Map<String, CharFilterFactory> charFilter;
+
+ public TestAnalysis(IndexAnalyzers indexAnalyzers,
+ Map<String, TokenFilterFactory> tokenFilter,
+ Map<String, TokenizerFactory> tokenizer,
+ Map<String, CharFilterFactory> charFilter) {
+ this.indexAnalyzers = indexAnalyzers;
+ this.tokenFilter = tokenFilter;
+ this.tokenizer = tokenizer;
+ this.charFilter = charFilter;
+ }
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
index 1af9fa5ba7..c4bd964365 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
@@ -43,6 +43,11 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
public abstract class ESTokenStreamTestCase extends BaseTokenStreamTestCase {
static {
+ try {
+ Class.forName("org.elasticsearch.test.ESTestCase");
+ } catch (ClassNotFoundException e) {
+ throw new AssertionError(e);
+ }
BootstrapForTesting.ensureInitialized();
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
index cde4e5f6ac..2e8001bf0f 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.test;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
@@ -27,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -62,7 +62,7 @@ final class ExternalNode implements Closeable {
private final String clusterName;
private TransportClient client;
- private final ESLogger logger = Loggers.getLogger(getClass());
+ private final Logger logger = Loggers.getLogger(getClass());
private Settings externalNodeSettings;
@@ -109,7 +109,6 @@ final class ExternalNode implements Closeable {
case "path.home":
case NetworkModule.TRANSPORT_TYPE_KEY:
case "discovery.type":
- case NetworkModule.TRANSPORT_SERVICE_TYPE_KEY:
case "config.ignore_system_properties":
continue;
default:
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
index 6f6ac8488d..adab3b7045 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
@@ -19,6 +19,7 @@
package org.elasticsearch.test;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@@ -27,7 +28,6 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.breaker.CircuitBreaker;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -56,7 +56,7 @@ import static org.junit.Assert.assertThat;
*/
public final class ExternalTestCluster extends TestCluster {
- private static final ESLogger logger = Loggers.getLogger(ExternalTestCluster.class);
+ private static final Logger logger = Loggers.getLogger(ExternalTestCluster.class);
private static final AtomicInteger counter = new AtomicInteger();
public static final String EXTERNAL_CLUSTER_PREFIX = "external_";
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
index f46069484d..a5b1667a13 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
@@ -21,7 +21,6 @@ package org.elasticsearch.test;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
-import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.plugins.Plugin;
import java.util.Arrays;
@@ -31,6 +30,8 @@ public final class InternalSettingsPlugin extends Plugin {
public static final Setting<Integer> VERSION_CREATED =
Setting.intSetting("index.version.created", 0, Property.IndexScope, Property.NodeScope);
+ public static final Setting<String> PROVIDED_NAME_SETTING =
+ Setting.simpleString("index.provided_name",Property.IndexScope, Property.NodeScope);
public static final Setting<Boolean> MERGE_ENABLED =
Setting.boolSetting("index.merge.enabled", true, Property.IndexScope, Property.NodeScope);
public static final Setting<Long> INDEX_CREATION_DATE_SETTING =
@@ -38,6 +39,7 @@ public final class InternalSettingsPlugin extends Plugin {
@Override
public List<Setting<?>> getSettings() {
- return Arrays.asList(VERSION_CREATED, MERGE_ENABLED, INDEX_CREATION_DATE_SETTING);
+ return Arrays.asList(VERSION_CREATED, MERGE_ENABLED,
+ INDEX_CREATION_DATE_SETTING, PROVIDED_NAME_SETTING);
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
index 1dd1c5d9b6..6a5493ff1e 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
@@ -24,6 +24,7 @@ import com.carrotsearch.randomizedtesting.SysGlobals;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
@@ -51,7 +52,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@@ -83,6 +83,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ScriptService;
@@ -147,7 +148,7 @@ import static org.junit.Assert.fail;
*/
public final class InternalTestCluster extends TestCluster {
- private final ESLogger logger = Loggers.getLogger(getClass());
+ private final Logger logger = Loggers.getLogger(getClass());
/**
* The number of ports in the range used for this JVM
@@ -825,7 +826,11 @@ public final class InternalTestCluster extends TestCluster {
}
void startNode() {
- node.start();
+ try {
+ node.start();
+ } catch (NodeValidationException e) {
+ throw new RuntimeException(e);
+ }
}
void closeNode() throws IOException {
@@ -1655,10 +1660,18 @@ public final class InternalTestCluster extends TestCluster {
}
public void clearDisruptionScheme() {
+ clearDisruptionScheme(true);
+ }
+
+ public void clearDisruptionScheme(boolean ensureHealthyCluster) {
if (activeDisruptionScheme != null) {
TimeValue expectedHealingTime = activeDisruptionScheme.expectedTimeToHeal();
logger.info("Clearing active scheme {}, expected healing time {}", activeDisruptionScheme, expectedHealingTime);
- activeDisruptionScheme.removeAndEnsureHealthy(this);
+ if (ensureHealthyCluster) {
+ activeDisruptionScheme.removeAndEnsureHealthy(this);
+ } else {
+ activeDisruptionScheme.removeFromCluster(this);
+ }
}
activeDisruptionScheme = null;
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java
index e1967256dd..fe46251e3e 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java
@@ -19,6 +19,7 @@
package org.elasticsearch.test;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.Version;
@@ -31,7 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.IndexFolderUpgrader;
@@ -61,7 +61,7 @@ import static org.junit.Assert.assertEquals;
public class OldIndexUtils {
- public static List<String> loadIndexesList(String prefix, Path bwcIndicesPath) throws IOException {
+ public static List<String> loadDataFilesList(String prefix, Path bwcIndicesPath) throws IOException {
List<String> indexes = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(bwcIndicesPath, prefix + "-*.zip")) {
for (Path path : stream) {
@@ -86,7 +86,7 @@ public class OldIndexUtils {
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
}
- public static void loadIndex(String indexName, String indexFile, Path unzipDir, Path bwcPath, ESLogger logger, Path... paths) throws
+ public static void loadIndex(String indexName, String indexFile, Path unzipDir, Path bwcPath, Logger logger, Path... paths) throws
Exception {
Path unzipDataDir = unzipDir.resolve("data");
@@ -128,7 +128,7 @@ public class OldIndexUtils {
}
// randomly distribute the files from src over dests paths
- public static void copyIndex(final ESLogger logger, final Path src, final String indexName, final Path... dests) throws IOException {
+ public static void copyIndex(final Logger logger, final Path src, final String indexName, final Path... dests) throws IOException {
Path destinationDataPath = dests[randomInt(dests.length - 1)];
for (Path dest : dests) {
Path indexDir = dest.resolve(indexName);
@@ -194,7 +194,7 @@ public class OldIndexUtils {
}
public static boolean isUpgraded(Client client, String index) throws Exception {
- ESLogger logger = Loggers.getLogger(OldIndexUtils.class);
+ Logger logger = Loggers.getLogger(OldIndexUtils.class);
int toUpgrade = 0;
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes());
diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
index 2629f655c9..124960fe92 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
@@ -20,12 +20,12 @@
package org.elasticsearch.test;
import com.carrotsearch.hppc.ObjectArrayList;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexTemplateMissingException;
@@ -45,7 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
*/
public abstract class TestCluster implements Closeable {
- protected final ESLogger logger = Loggers.getLogger(getClass());
+ protected final Logger logger = Loggers.getLogger(getClass());
private final long seed;
protected Random random;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
index df35b9c345..c6a1f64820 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
@@ -18,10 +18,6 @@
*/
package org.elasticsearch.test;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
@@ -31,7 +27,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
-import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
@@ -43,13 +38,14 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchExtBuilder;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSearchResult;
-import org.elasticsearch.search.fetch.FetchSubPhase;
-import org.elasticsearch.search.fetch.FetchSubPhaseContext;
+import org.elasticsearch.search.fetch.StoredFieldsContext;
+import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
@@ -65,6 +61,10 @@ import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import org.elasticsearch.threadpool.ThreadPool;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
public class TestSearchContext extends SearchContext {
final BigArrays bigArrays;
@@ -89,7 +89,7 @@ public class TestSearchContext extends SearchContext {
private SearchContextAggregations aggregations;
private final long originNanoTime = System.nanoTime();
- private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
+ private final Map<String, SearchExtBuilder> searchExtBuilders = new HashMap<>();
public TestSearchContext(ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService, IndexService indexService) {
super(ParseFieldMatcher.STRICT);
@@ -116,7 +116,7 @@ public class TestSearchContext extends SearchContext {
}
@Override
- public void preProcess() {
+ public void preProcess(boolean rewrite) {
}
@Override
@@ -196,12 +196,13 @@ public class TestSearchContext extends SearchContext {
}
@Override
- public <SubPhaseContext extends FetchSubPhaseContext> SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory<SubPhaseContext> contextFactory) {
- String subPhaseName = contextFactory.getName();
- if (subPhaseContexts.get(subPhaseName) == null) {
- subPhaseContexts.put(subPhaseName, contextFactory.newContextInstance());
- }
- return (SubPhaseContext) subPhaseContexts.get(subPhaseName);
+ public void addSearchExt(SearchExtBuilder searchExtBuilder) {
+ searchExtBuilders.put(searchExtBuilder.getWriteableName(), searchExtBuilder);
+ }
+
+ @Override
+ public SearchExtBuilder getSearchExt(String name) {
+ return searchExtBuilders.get(name);
}
@Override
@@ -262,6 +263,16 @@ public class TestSearchContext extends SearchContext {
}
@Override
+ public DocValueFieldsContext docValueFieldsContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext docValueFieldsContext(DocValueFieldsContext docValueFieldsContext) {
+ return null;
+ }
+
+ @Override
public ContextIndexSearcher searcher() {
return searcher;
}
@@ -284,9 +295,6 @@ public class TestSearchContext extends SearchContext {
}
@Override
- public AnalysisService analysisService() { return indexService.analysisService();}
-
- @Override
public SimilarityService similarityService() {
return null;
}
@@ -430,17 +438,28 @@ public class TestSearchContext extends SearchContext {
}
@Override
- public boolean hasFieldNames() {
+ public boolean hasStoredFields() {
return false;
}
@Override
- public List<String> fieldNames() {
+ public boolean hasStoredFieldsContext() {
+ return false;
+ }
+
+ @Override
+ public boolean storedFieldsRequested() {
+ return false;
+ }
+
+ @Override
+ public StoredFieldsContext storedFieldsContext() {
return null;
}
@Override
- public void emptyFieldNames() {
+ public SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext) {
+ return null;
}
@Override
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
index b210a20cf7..944ddb9b05 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
@@ -21,12 +21,16 @@ package org.elasticsearch.test.disruption;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.test.InternalTestCluster;
-import java.util.HashSet;
+import java.util.Arrays;
import java.util.Random;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
/**
* Suspends all threads on the specified node in order to simulate a long gc.
@@ -34,8 +38,8 @@ import java.util.regex.Pattern;
public class LongGCDisruption extends SingleNodeDisruption {
private static final Pattern[] unsafeClasses = new Pattern[]{
- // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing
- Pattern.compile("Logger")
+ // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing
+ Pattern.compile("logging\\.log4j")
};
protected final String disruptedNode;
@@ -49,13 +53,67 @@ public class LongGCDisruption extends SingleNodeDisruption {
@Override
public synchronized void startDisrupting() {
if (suspendedThreads == null) {
- suspendedThreads = new HashSet<>();
- stopNodeThreads(disruptedNode, suspendedThreads);
+ boolean success = false;
+ try {
+ suspendedThreads = ConcurrentHashMap.newKeySet();
+
+ final String currentThreadName = Thread.currentThread().getName();
+ assert currentThreadName.contains("[" + disruptedNode + "]") == false :
+ "current thread match pattern. thread name: " + currentThreadName + ", node: " + disruptedNode;
+ // we spawn a background thread to protect against deadlock which can happen
+ // if there are shared resources between caller thread and and suspended threads
+ // see unsafeClasses to how to avoid that
+ final AtomicReference<Exception> stoppingError = new AtomicReference<>();
+ final Thread stoppingThread = new Thread(new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ stoppingError.set(e);
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ // keep trying to stop threads, until no new threads are discovered.
+ while (stopNodeThreads(disruptedNode, suspendedThreads)) {
+ if (Thread.interrupted()) {
+ return;
+ }
+ }
+ }
+ });
+ stoppingThread.setName(currentThreadName + "[LongGCDisruption][threadStopper]");
+ stoppingThread.start();
+ try {
+ stoppingThread.join(getStoppingTimeoutInMillis());
+ } catch (InterruptedException e) {
+ stoppingThread.interrupt(); // best effort to signal stopping
+ throw new RuntimeException(e);
+ }
+ if (stoppingError.get() != null) {
+ throw new RuntimeException("unknown error while stopping threads", stoppingError.get());
+ }
+ if (stoppingThread.isAlive()) {
+ logger.warn("failed to stop node [{}]'s threads within [{}] millis. Stopping thread stack trace:\n {}"
+ , disruptedNode, getStoppingTimeoutInMillis(), stackTrace(stoppingThread));
+ stoppingThread.interrupt(); // best effort;
+ throw new RuntimeException("stopping node threads took too long");
+ }
+ success = true;
+ } finally {
+ if (success == false) {
+ // resume threads if failed
+ resumeThreads(suspendedThreads);
+ suspendedThreads = null;
+ }
+ }
} else {
throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first");
}
}
+ private String stackTrace(Thread thread) {
+ return Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"));
+ }
+
@Override
public synchronized void stopDisrupting() {
if (suspendedThreads != null) {
@@ -75,6 +133,13 @@ public class LongGCDisruption extends SingleNodeDisruption {
return TimeValue.timeValueMillis(0);
}
+ /**
+ * resolves all threads belonging to given node and suspends them if their current stack trace
+ * is "safe". Threads are added to nodeThreads if suspended.
+ *
+ * returns true if some live threads were found. The caller is expected to call this method
+ * until no more "live" are found.
+ */
@SuppressWarnings("deprecation") // stops/resumes threads intentionally
@SuppressForbidden(reason = "stops/resumes threads intentionally")
protected boolean stopNodeThreads(String node, Set<Thread> nodeThreads) {
@@ -86,7 +151,7 @@ public class LongGCDisruption extends SingleNodeDisruption {
allThreads = null;
}
}
- boolean stopped = false;
+ boolean liveThreadsFound = false;
final String nodeThreadNamePart = "[" + node + "]";
for (Thread thread : allThreads) {
if (thread == null) {
@@ -95,14 +160,15 @@ public class LongGCDisruption extends SingleNodeDisruption {
String name = thread.getName();
if (name.contains(nodeThreadNamePart)) {
if (thread.isAlive() && nodeThreads.add(thread)) {
- stopped = true;
+ liveThreadsFound = true;
+ logger.trace("stopping thread [{}]", name);
thread.suspend();
// double check the thread is not in a shared resource like logging. If so, let it go and come back..
boolean safe = true;
safe:
for (StackTraceElement stackElement : thread.getStackTrace()) {
String className = stackElement.getClassName();
- for (Pattern unsafePattern : unsafeClasses) {
+ for (Pattern unsafePattern : getUnsafeClasses()) {
if (unsafePattern.matcher(className).find()) {
safe = false;
break safe;
@@ -110,13 +176,24 @@ public class LongGCDisruption extends SingleNodeDisruption {
}
}
if (!safe) {
+ logger.trace("resuming thread [{}] as it is in a critical section", name);
thread.resume();
nodeThreads.remove(thread);
}
}
}
}
- return stopped;
+ return liveThreadsFound;
+ }
+
+ // for testing
+ protected Pattern[] getUnsafeClasses() {
+ return unsafeClasses;
+ }
+
+ // for testing
+ protected long getStoppingTimeoutInMillis() {
+ return TimeValue.timeValueSeconds(30).getMillis();
}
@SuppressWarnings("deprecation") // stops/resumes threads intentionally
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruptionTest.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruptionTest.java
new file mode 100644
index 0000000000..3819044475
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruptionTest.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class LongGCDisruptionTest extends ESTestCase {
+
+ static class LockedExecutor {
+ ReentrantLock lock = new ReentrantLock();
+
+ public void executeLocked(Runnable r) {
+ lock.lock();
+ try {
+ r.run();
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+
+ public void testBlockingTimeout() throws Exception {
+ final String nodeName = "test_node";
+ LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) {
+ @Override
+ protected Pattern[] getUnsafeClasses() {
+ return new Pattern[]{
+ Pattern.compile(LockedExecutor.class.getSimpleName())
+ };
+ }
+
+ @Override
+ protected long getStoppingTimeoutInMillis() {
+ return 100;
+ }
+ };
+ final AtomicBoolean stop = new AtomicBoolean();
+ final CountDownLatch underLock = new CountDownLatch(1);
+ final CountDownLatch pauseUnderLock = new CountDownLatch(1);
+ final LockedExecutor lockedExecutor = new LockedExecutor();
+ final AtomicLong ops = new AtomicLong();
+ try {
+ Thread[] threads = new Thread[10];
+ for (int i = 0; i < 10; i++) {
+ // at least one locked and one none lock thread
+ final boolean lockedExec = (i < 9 && randomBoolean()) || i == 0;
+ threads[i] = new Thread(() -> {
+ while (stop.get() == false) {
+ if (lockedExec) {
+ lockedExecutor.executeLocked(() -> {
+ try {
+ underLock.countDown();
+ ops.incrementAndGet();
+ pauseUnderLock.await();
+ } catch (InterruptedException e) {
+
+ }
+ });
+ } else {
+ ops.incrementAndGet();
+ }
+ }
+ });
+ threads[i].setName("[" + nodeName + "][" + i + "]");
+ threads[i].start();
+ }
+ // make sure some threads are under lock
+ underLock.await();
+ RuntimeException e = expectThrows(RuntimeException.class, disruption::startDisrupting);
+ assertThat(e.getMessage(), containsString("stopping node threads took too long"));
+ } finally {
+ stop.set(true);
+ pauseUnderLock.countDown();
+ }
+ }
+
+ /**
+ * Checks that a GC disruption never blocks threads while they are doing something "unsafe"
+ * but does keep retrying until all threads can be safely paused
+ */
+ public void testNotBlockingUnsafeStackTraces() throws Exception {
+ final String nodeName = "test_node";
+ LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) {
+ @Override
+ protected Pattern[] getUnsafeClasses() {
+ return new Pattern[]{
+ Pattern.compile(LockedExecutor.class.getSimpleName())
+ };
+ }
+ };
+ final AtomicBoolean stop = new AtomicBoolean();
+ final LockedExecutor lockedExecutor = new LockedExecutor();
+ final AtomicLong ops = new AtomicLong();
+ try {
+ Thread[] threads = new Thread[10];
+ for (int i = 0; i < 10; i++) {
+ threads[i] = new Thread(() -> {
+ for (int iter = 0; stop.get() == false; iter++) {
+ if (iter % 2 == 0) {
+ lockedExecutor.executeLocked(() -> {
+ Thread.yield(); // give some chance to catch this stack trace
+ ops.incrementAndGet();
+ });
+ } else {
+ Thread.yield(); // give some chance to catch this stack trace
+ ops.incrementAndGet();
+ }
+ }
+ });
+ threads[i].setName("[" + nodeName + "][" + i + "]");
+ threads[i].start();
+ }
+ // make sure some threads are under lock
+ disruption.startDisrupting();
+ long first = ops.get();
+ assertThat(lockedExecutor.lock.isLocked(), equalTo(false)); // no threads should own the lock
+ Thread.sleep(100);
+ assertThat(ops.get(), equalTo(first));
+ disruption.stopDisrupting();
+ assertBusy(() -> assertThat(ops.get(), greaterThan(first)));
+ } finally {
+ stop.set(true);
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java
index 40839f428e..f7094d8ae9 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java
@@ -20,7 +20,7 @@
package org.elasticsearch.test.disruption;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
@@ -45,7 +45,7 @@ import static org.junit.Assert.assertFalse;
*/
public class NetworkDisruption implements ServiceDisruptionScheme {
- private final ESLogger logger = Loggers.getLogger(NetworkDisruption.class);
+ private final Logger logger = Loggers.getLogger(NetworkDisruption.class);
private final DisruptedLinks disruptedLinks;
private final NetworkLinkDisruptionType networkLinkDisruptionType;
@@ -77,7 +77,7 @@ public class NetworkDisruption implements ServiceDisruptionScheme {
protected void ensureNodeCount(InternalTestCluster cluster) {
assertFalse("cluster failed to form after disruption was healed", cluster.client().admin().cluster().prepareHealth()
.setWaitForNodes("" + cluster.size())
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get().isTimedOut());
}
@@ -328,6 +328,18 @@ public class NetworkDisruption implements ServiceDisruptionScheme {
}
}
+ public static class IsolateAllNodes extends DisruptedLinks {
+
+ public IsolateAllNodes(Set<String> nodes) {
+ super(nodes);
+ }
+
+ @Override
+ public boolean disrupt(String node1, String node2) {
+ return true;
+ }
+ }
+
/**
* Abstract class representing various types of network disruptions. Instances of this class override the {@link #applyDisruption}
* method to apply their specific disruption type to requests that are send from a source to a target node.
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java
index 4d0f1123a1..edc261c175 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java
@@ -56,6 +56,21 @@ public class NetworkDisruptionTests extends ESTestCase {
assertTrue(topology.getMajoritySide().size() >= topology.getMinoritySide().size());
}
+ public void testIsolateAll() {
+ Set<String> nodes = generateRandomStringSet(1, 10);
+ NetworkDisruption.DisruptedLinks topology = new NetworkDisruption.IsolateAllNodes(nodes);
+ for (int i = 0; i < 10; i++) {
+ final String node1 = randomFrom(nodes);
+ final String node2 = randomFrom(nodes);
+ if (node1.equals(node2)) {
+ continue;
+ }
+ assertTrue(topology.nodes().contains(node1));
+ assertTrue(topology.nodes().contains(node2));
+ assertTrue(topology.disrupt(node1, node2));
+ }
+ }
+
public void testBridge() {
Set<String> partition1 = generateRandomStringSet(1, 10);
Set<String> partition2 = generateRandomStringSet(1, 10);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
index f74280c014..862e18d7ac 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.disruption;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.InternalTestCluster;
@@ -28,7 +28,7 @@ import static org.junit.Assert.assertFalse;
public abstract class SingleNodeDisruption implements ServiceDisruptionScheme {
- protected final ESLogger logger = Loggers.getLogger(getClass());
+ protected final Logger logger = Loggers.getLogger(getClass());
protected volatile String disruptedNode;
protected volatile InternalTestCluster cluster;
@@ -85,7 +85,7 @@ public abstract class SingleNodeDisruption implements ServiceDisruptionScheme {
protected void ensureNodeCount(InternalTestCluster cluster) {
assertFalse("cluster failed to form after disruption was healed", cluster.client().admin().cluster().prepareHealth()
.setWaitForNodes("" + cluster.size())
- .setWaitForRelocatingShards(0)
+ .setWaitForNoRelocatingShards(true)
.get().isTimedOut());
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
index a0f027bcbd..d977a21543 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
@@ -19,8 +19,8 @@
package org.elasticsearch.test.engine;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.ShardId;
@@ -35,12 +35,12 @@ class AssertingSearcher extends Engine.Searcher {
private RuntimeException firstReleaseStack;
private final Object lock = new Object();
private final int initialRefCount;
- private final ESLogger logger;
+ private final Logger logger;
private final AtomicBoolean closed = new AtomicBoolean(false);
AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher,
ShardId shardId,
- ESLogger logger) {
+ Logger logger) {
super(wrappedSearcher.source(), indexSearcher);
// we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher
// with a wrapped reader.
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
index 304e304749..fbc4352b1e 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.test.engine;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.IndexReader;
@@ -28,7 +29,6 @@ import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.SearcherManager;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -66,7 +66,7 @@ public final class MockEngineSupport {
private final AtomicBoolean closing = new AtomicBoolean(false);
- private final ESLogger logger = Loggers.getLogger(Engine.class);
+ private final Logger logger = Loggers.getLogger(Engine.class);
private final ShardId shardId;
private final QueryCache filterCache;
private final QueryCachingPolicy filterCachingPolicy;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
index e321a98f37..b2b41b3146 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
@@ -19,12 +19,14 @@
package org.elasticsearch.test.gateway;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
+import java.util.List;
+
/**
* An allocator used for tests that doesn't do anything
*/
@@ -37,12 +39,12 @@ public class NoopGatewayAllocator extends GatewayAllocator {
}
@Override
- public void applyStartedShards(StartedRerouteAllocation allocation) {
+ public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
// noop
}
@Override
- public void applyFailedShards(FailedRerouteAllocation allocation) {
+ public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
// noop
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java
new file mode 100644
index 0000000000..5caf457127
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.gateway;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedShard;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.AsyncShardFetch;
+import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.gateway.PrimaryShardAllocator;
+import org.elasticsearch.gateway.ReplicaShardAllocator;
+import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * A gateway allocator implementation that keeps an in memory list of started shard allocation
+ * that are used as replies to the, normally async, fetch data requests. The in memory list
+ * is adapted when shards are started and failed.
+ *
+ * Nodes leaving and joining the cluster do not change the list of shards the class tracks but
+ * rather serves as a filter to what is returned by fetch data. Concretely - fetch data will
+ * only return shards that were started on nodes that are currently part of the cluster.
+ *
+ * For now only primary shard related data is fetched. Replica request always get an empty response.
+ *
+ *
+ * This class is useful to use in unit tests that require the functionality of {@link GatewayAllocator} but do
+ * not have all the infrastructure required to use it.
+ */
+public class TestGatewayAllocator extends GatewayAllocator {
+
+ Map<String /* node id */, Map<ShardId, ShardRouting>> knownAllocations = new HashMap<>();
+ DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES;
+
+ PrimaryShardAllocator primaryShardAllocator = new PrimaryShardAllocator(Settings.EMPTY) {
+ @Override
+ protected AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation) {
+ // for now always return immediately what we know
+ final ShardId shardId = shard.shardId();
+ final Set<String> ignoreNodes = allocation.getIgnoreNodes(shardId);
+ Map<DiscoveryNode, NodeGatewayStartedShards> foundShards = knownAllocations.values().stream()
+ .flatMap(shardMap -> shardMap.values().stream())
+ .filter(ks -> ks.shardId().equals(shardId))
+ .filter(ks -> ignoreNodes.contains(ks.currentNodeId()) == false)
+ .filter(ks -> currentNodes.nodeExists(ks.currentNodeId()))
+ .collect(Collectors.toMap(
+ routing -> currentNodes.get(routing.currentNodeId()),
+ routing ->
+ new NodeGatewayStartedShards(
+ currentNodes.get(routing.currentNodeId()), -1, routing.allocationId().getId(), routing.primary())));
+
+ return new AsyncShardFetch.FetchResult<>(shardId, foundShards, Collections.emptySet(), ignoreNodes);
+ }
+ };
+
+ ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) {
+ @Override
+ protected AsyncShardFetch.FetchResult<NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation) {
+ // for now, just pretend no node has data
+ final ShardId shardId = shard.shardId();
+ return new AsyncShardFetch.FetchResult<>(shardId, Collections.emptyMap(), Collections.emptySet(),
+ allocation.getIgnoreNodes(shardId));
+ }
+ };
+
+ public TestGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
+ currentNodes = allocation.nodes();
+ allocation.routingNodes().shards(ShardRouting::active).forEach(this::addKnownAllocation);
+ }
+
+ @Override
+ public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
+ currentNodes = allocation.nodes();
+ for (FailedShard failedShard : failedShards) {
+ final ShardRouting failedRouting = failedShard.getRoutingEntry();
+ Map<ShardId, ShardRouting> nodeAllocations = knownAllocations.get(failedRouting.currentNodeId());
+ if (nodeAllocations != null) {
+ nodeAllocations.remove(failedRouting.shardId());
+ if (nodeAllocations.isEmpty()) {
+ knownAllocations.remove(failedRouting.currentNodeId());
+ }
+ }
+ }
+ }
+
+ @Override
+ public void allocateUnassigned(RoutingAllocation allocation) {
+ currentNodes = allocation.nodes();
+ innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator);
+ }
+
+ /**
+ * manually add a specific shard to the allocations the gateway keeps track of
+ */
+ public void addKnownAllocation(ShardRouting shard) {
+ knownAllocations.computeIfAbsent(shard.currentNodeId(), id -> new HashMap<>())
+ .put(shard.shardId(), shard);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
index e09cc7534e..d0e799d63f 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
@@ -32,7 +32,6 @@ import static java.lang.annotation.ElementType.TYPE;
* It supports multiple logger:level comma separated key value pairs
* Use the _root keyword to set the root logger level
* e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
- * or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({PACKAGE, TYPE, METHOD})
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
index 8237095b49..0009c21d6a 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.junit.listeners;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.junit.annotations.TestLogging;
@@ -47,7 +47,8 @@ public class LoggingListener extends RunListener {
@Override
public void testRunStarted(Description description) throws Exception {
- previousPackageLoggingMap = processTestLogging(description.getTestClass().getPackage().getAnnotation(TestLogging.class));
+ Package testClassPackage = description.getTestClass().getPackage();
+ previousPackageLoggingMap = processTestLogging(testClassPackage != null ? testClassPackage.getAnnotation(TestLogging.class) : null);
previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class));
}
@@ -68,7 +69,7 @@ public class LoggingListener extends RunListener {
previousLoggingMap = reset(previousLoggingMap);
}
- private static ESLogger resolveLogger(String loggerName) {
+ private static Logger resolveLogger(String loggerName) {
if (loggerName.equalsIgnoreCase("_root")) {
return ESLoggerFactory.getRootLogger();
}
@@ -82,9 +83,9 @@ public class LoggingListener extends RunListener {
}
Map<String, String> previousValues = new HashMap<>();
for (Map.Entry<String, String> entry : map.entrySet()) {
- ESLogger esLogger = resolveLogger(entry.getKey());
- previousValues.put(entry.getKey(), esLogger.getLevel());
- esLogger.setLevel(entry.getValue());
+ Logger logger = resolveLogger(entry.getKey());
+ previousValues.put(entry.getKey(), logger.getLevel().toString());
+ Loggers.setLevel(logger, entry.getValue());
}
return previousValues;
}
@@ -109,8 +110,8 @@ public class LoggingListener extends RunListener {
private Map<String, String> reset(Map<String, String> map) {
if (map != null) {
for (Map.Entry<String, String> previousLogger : map.entrySet()) {
- ESLogger esLogger = resolveLogger(previousLogger.getKey());
- esLogger.setLevel(previousLogger.getValue());
+ Logger logger = resolveLogger(previousLogger.getKey());
+ Loggers.setLevel(logger, previousLogger.getValue());
}
}
return null;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
index 9243364955..93ad8bb1e9 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
@@ -19,8 +19,8 @@
package org.elasticsearch.test.junit.listeners;
import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESTestCase;
@@ -47,7 +47,7 @@ import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TE
*/
public class ReproduceInfoPrinter extends RunListener {
- protected final ESLogger logger = Loggers.getLogger(ESTestCase.class);
+ protected final Logger logger = Loggers.getLogger(ESTestCase.class);
@Override
public void testStarted(Description description) throws Exception {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
index 573c301105..1e419faf06 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
@@ -55,6 +55,7 @@ import java.util.Set;
import javax.net.ssl.SSLContext;
+import static java.util.Collections.singletonMap;
import static java.util.Collections.sort;
import static java.util.Collections.unmodifiableList;
@@ -113,6 +114,7 @@ public class ESRestTestCase extends ESTestCase {
}
}
+
/**
* Clean up after the test case.
*/
@@ -137,23 +139,61 @@ public class ESRestTestCase extends ESTestCase {
return adminClient;
}
+ /**
+ * Returns whether to preserve the indices created during this test on completion of this test.
+ * Defaults to {@code false}. Override this method if indices should be preserved after the test,
+ * with the assumption that some other process or test will clean up the indices afterward.
+ * This is useful if the data directory and indices need to be preserved between test runs
+ * (for example, when testing rolling upgrades).
+ */
+ protected boolean preserveIndicesUponCompletion() {
+ return false;
+ }
+
private void wipeCluster() throws IOException {
- // wipe indices
- try {
- adminClient().performRequest("DELETE", "*");
- } catch (ResponseException e) {
- // 404 here just means we had no indexes
- if (e.getResponse().getStatusLine().getStatusCode() != 404) {
- throw e;
+ if (preserveIndicesUponCompletion() == false) {
+ // wipe indices
+ try {
+ adminClient().performRequest("DELETE", "*");
+ } catch (ResponseException e) {
+ // 404 here just means we had no indexes
+ if (e.getResponse().getStatusLine().getStatusCode() != 404) {
+ throw e;
+ }
}
}
// wipe index templates
adminClient().performRequest("DELETE", "_template/*");
- // wipe snapshots
- // Technically this deletes all repositories and leave the snapshots in the repository. OK.
- adminClient().performRequest("DELETE", "_snapshot/*");
+ wipeSnapshots();
+ }
+
+ /**
+ * Wipe fs snapshots we created one by one and all repositories so that the next test can create the repositories fresh and they'll
+ * start empty. There isn't an API to delete all snapshots. There is an API to delete all snapshot repositories but that leaves all of
+ * the snapshots intact in the repository.
+ */
+ private void wipeSnapshots() throws IOException {
+ for (Map.Entry<String, ?> repo : entityAsMap(adminClient.performRequest("GET", "_snapshot/_all")).entrySet()) {
+ String repoName = repo.getKey();
+ Map<?, ?> repoSpec = (Map<?, ?>) repo.getValue();
+ String repoType = (String) repoSpec.get("type");
+ if (repoType.equals("fs")) {
+ // All other repo types we really don't have a chance of being able to iterate properly, sadly.
+ String url = "_snapshot/" + repoName + "/_all";
+ Map<String, String> params = singletonMap("ignore_unavailable", "true");
+ List<?> snapshots = (List<?>) entityAsMap(adminClient.performRequest("GET", url, params)).get("snapshots");
+ for (Object snapshot : snapshots) {
+ Map<?, ?> snapshotInfo = (Map<?, ?>) snapshot;
+ String name = (String) snapshotInfo.get("snapshot");
+ logger.debug("wiping snapshot [{}/{}]", repoName, name);
+ adminClient().performRequest("DELETE", "_snapshot/" + repoName + "/" + name);
+ }
+ }
+ logger.debug("wiping snapshot repository [{}]", repoName);
+ adminClient().performRequest("DELETE", "_snapshot/" + repoName);
+ }
}
/**
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java
index 41ae7d8c04..8040c421dc 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java
@@ -19,19 +19,18 @@
package org.elasticsearch.test.rest.yaml;
import com.carrotsearch.randomizedtesting.RandomizedTest;
-
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi;
@@ -55,7 +54,7 @@ import java.util.Set;
* REST calls.
*/
public class ClientYamlTestClient {
- private static final ESLogger logger = Loggers.getLogger(ClientYamlTestClient.class);
+ private static final Logger logger = Loggers.getLogger(ClientYamlTestClient.class);
//query_string params that don't need to be declared in the spec, they are supported by default
private static final Set<String> ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path");
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java
index 43feb238cc..2f1e42c12c 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java
@@ -19,9 +19,9 @@
package org.elasticsearch.test.rest.yaml;
import org.apache.http.HttpHost;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.client.RestClient;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec;
@@ -39,7 +39,7 @@ import java.util.Map;
*/
public class ClientYamlTestExecutionContext {
- private static final ESLogger logger = Loggers.getLogger(ClientYamlTestExecutionContext.class);
+ private static final Logger logger = Loggers.getLogger(ClientYamlTestExecutionContext.class);
private final Stash stash = new Stash();
@@ -75,8 +75,10 @@ public class ClientYamlTestExecutionContext {
response = e.getRestTestResponse();
throw e;
} finally {
+ // if we hit a bad exception the response is null
+ Object repsponseBody = response != null ? response.getBody() : null;
//we always stash the last response body
- stash.stashValue("body", response.getBody());
+ stash.stashValue("body", repsponseBody);
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/FileUtils.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/FileUtils.java
index caaa8b2ec8..4519953819 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/FileUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/FileUtils.java
@@ -155,7 +155,7 @@ public final class FileUtils {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (file.toString().endsWith(fileSuffix)) {
- String groupName = file.toAbsolutePath().getParent().getFileName().toString();
+ String groupName = dir.relativize(file.getParent()).toString();
Set<Path> filesSet = files.get(groupName);
if (filesSet == null) {
filesSet = new HashSet<>();
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java
index dff1e59762..d9a4d957a2 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java
@@ -19,8 +19,8 @@
package org.elasticsearch.test.rest.yaml;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -39,7 +39,7 @@ import java.util.regex.Pattern;
public class Stash implements ToXContent {
private static final Pattern EXTENDED_KEY = Pattern.compile("\\$\\{([^}]+)\\}");
- private static final ESLogger logger = Loggers.getLogger(Stash.class);
+ private static final Logger logger = Loggers.getLogger(Stash.class);
public static final Stash EMPTY = new Stash();
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java
index af4a8e4f51..e233e9fab8 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java
@@ -18,9 +18,9 @@
*/
package org.elasticsearch.test.rest.yaml.section;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext;
@@ -66,7 +66,7 @@ import static org.junit.Assert.fail;
*/
public class DoSection implements ExecutableSection {
- private static final ESLogger logger = Loggers.getLogger(DoSection.class);
+ private static final Logger logger = Loggers.getLogger(DoSection.class);
private final XContentLocation location;
private String catchParam;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java
index 1e8f38e7a4..b531f180fd 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -34,7 +34,7 @@ import static org.junit.Assert.fail;
*/
public class GreaterThanAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class);
+ private static final Logger logger = Loggers.getLogger(GreaterThanAssertion.class);
public GreaterThanAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java
index 9c9936592c..14b1a08a87 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java
@@ -19,7 +19,7 @@
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -35,7 +35,7 @@ import static org.junit.Assert.fail;
*/
public class GreaterThanEqualToAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class);
+ private static final Logger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class);
public GreaterThanEqualToAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java
index f679691c9c..a356182ab4 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -35,7 +35,7 @@ import static org.junit.Assert.assertThat;
*/
public class IsFalseAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
+ private static final Logger logger = Loggers.getLogger(IsFalseAssertion.class);
public IsFalseAssertion(XContentLocation location, String field) {
super(location, field, false);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java
index 12bd8a34ed..76ca0de70d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -36,7 +36,7 @@ import static org.junit.Assert.assertThat;
*/
public class IsTrueAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
+ private static final Logger logger = Loggers.getLogger(IsTrueAssertion.class);
public IsTrueAssertion(XContentLocation location, String field) {
super(location, field, true);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java
index 5fd111733d..062b9ecd87 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -36,7 +36,7 @@ import static org.junit.Assert.assertThat;
*/
public class LengthAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
+ private static final Logger logger = Loggers.getLogger(LengthAssertion.class);
public LengthAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java
index 2e9ab74448..591bd83fa6 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java
@@ -18,7 +18,7 @@
*/
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -35,7 +35,7 @@ import static org.junit.Assert.fail;
*/
public class LessThanAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class);
+ private static final Logger logger = Loggers.getLogger(LessThanAssertion.class);
public LessThanAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java
index 46e25332b6..7c5710f689 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java
@@ -19,7 +19,7 @@
package org.elasticsearch.test.rest.yaml.section;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -35,7 +35,7 @@ import static org.junit.Assert.fail;
*/
public class LessThanOrEqualToAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class);
+ private static final Logger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class);
public LessThanOrEqualToAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java
index c2a52bf735..2bfb94e658 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java
@@ -18,8 +18,8 @@
*/
package org.elasticsearch.test.rest.yaml.section;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentLocation;
@@ -44,7 +44,7 @@ import static org.junit.Assert.assertThat;
*/
public class MatchAssertion extends Assertion {
- private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class);
+ private static final Logger logger = Loggers.getLogger(MatchAssertion.class);
public MatchAssertion(XContentLocation location, String field, Object expectedValue) {
super(location, field, expectedValue);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
index 1b99d2f32c..057e7c4845 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -21,7 +21,7 @@ package org.elasticsearch.test.store;
import com.carrotsearch.randomizedtesting.SeedUtils;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.store.Directory;
@@ -34,7 +34,6 @@ import org.apache.lucene.util.TestRuleMarkFailure;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@@ -113,7 +112,7 @@ public class MockFSDirectoryService extends FsDirectoryService {
throw new UnsupportedOperationException();
}
- public static void checkIndex(ESLogger logger, Store store, ShardId shardId) {
+ public static void checkIndex(Logger logger, Store store, ShardId shardId) {
if (store.tryIncRef()) {
logger.info("start check index");
try {
@@ -172,7 +171,6 @@ public class MockFSDirectoryService extends FsDirectoryService {
w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen);
w.setThrottling(throttle);
w.setCheckIndexOnClose(false); // we do this on the index level
- w.setPreventDoubleWrite(preventDoubleWrite);
// TODO: make this test robust to virus scanner
w.setAssertNoDeleteOpenFile(false);
w.setUseSlowOpenClosers(false);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
index 57d9fe3ff0..70c8d2be11 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -19,13 +19,12 @@
package org.elasticsearch.test.store;
+import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.IndexEventListener;
@@ -98,7 +97,7 @@ public class MockFSIndexStore extends IndexStore {
if (indexShard != null) {
Boolean remove = shardSet.remove(indexShard);
if (remove == Boolean.TRUE) {
- ESLogger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
+ Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
index ec695e8bd4..b07e7315b8 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
@@ -19,6 +19,8 @@
package org.elasticsearch.test.tasks;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -51,7 +53,11 @@ public class MockTaskManager extends TaskManager {
try {
listener.onTaskRegistered(task);
} catch (Exception e) {
- logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify task manager listener about unregistering the task with id {}",
+ task.getId()),
+ e);
}
}
}
@@ -66,7 +72,9 @@ public class MockTaskManager extends TaskManager {
try {
listener.onTaskUnregistered(task);
} catch (Exception e) {
- logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify task manager listener about unregistering the task with id {}", task.getId()), e);
}
}
} else {
@@ -81,7 +89,11 @@ public class MockTaskManager extends TaskManager {
try {
listener.waitForTaskCompletion(task);
} catch (Exception e) {
- logger.warn("failed to notify task manager listener about waitForTaskCompletion the task with id {}", e, task.getId());
+ logger.warn(
+ (Supplier<?>) () -> new ParameterizedMessage(
+ "failed to notify task manager listener about waitForTaskCompletion the task with id {}",
+ task.getId()),
+ e);
}
}
super.waitForTaskCompletion(task, untilInNanos);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
index 2724f53857..b44e180b45 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -24,15 +24,19 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
@@ -42,22 +46,31 @@ import org.elasticsearch.transport.local.LocalTransport;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Random;
+import java.util.function.Supplier;
public class AssertingLocalTransport extends LocalTransport {
public static final String ASSERTING_TRANSPORT_NAME = "asserting_local";
- public static class TestPlugin extends Plugin {
- public void onModule(NetworkModule module) {
- module.registerTransport(ASSERTING_TRANSPORT_NAME, AssertingLocalTransport.class);
- }
+ public static class TestPlugin extends Plugin implements NetworkPlugin {
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(ASSERTING_TRANSPORT_MIN_VERSION_KEY, ASSERTING_TRANSPORT_MAX_VERSION_KEY);
}
+
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap(ASSERTING_TRANSPORT_NAME,
+ () -> new AssertingLocalTransport(settings, circuitBreakerService, threadPool, namedWriteableRegistry));
+ }
}
public static final Setting<Version> ASSERTING_TRANSPORT_MIN_VERSION_KEY =
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
index 9c4612b22b..f9b7e1d3a8 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -21,6 +21,7 @@ package org.elasticsearch.test.transport;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -29,7 +30,6 @@ import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
@@ -72,21 +72,13 @@ import java.util.concurrent.CopyOnWriteArrayList;
* (for example, @see org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing, which constructs
* fake DiscoveryNode instances where the publish address is one of the bound addresses).
*/
-public class MockTransportService extends TransportService {
+public final class MockTransportService extends TransportService {
public static class TestPlugin extends Plugin {
- public void onModule(NetworkModule module) {
- module.registerTransportService("mock", MockTransportService.class);
- }
-
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING);
}
- @Override
- public Settings additionalSettings() {
- return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build();
- }
}
public static MockTransportService local(Settings settings, Version version, ThreadPool threadPool) {
@@ -97,14 +89,14 @@ public class MockTransportService extends TransportService {
return version;
}
};
- return new MockTransportService(settings, transport, threadPool);
+ return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
}
private final Transport original;
@Inject
- public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
- super(settings, new LookupTestTransport(transport), threadPool);
+ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor) {
+ super(settings, new LookupTestTransport(transport), threadPool, interceptor);
this.original = transport;
}
@@ -642,6 +634,4 @@ public class MockTransportService extends TransportService {
}
}
}
-
-
}
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
index 33c5fcccad..ba831dde09 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
@@ -19,6 +19,8 @@
package org.elasticsearch.transport;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListenerResponseHandler;
@@ -222,8 +224,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (Exception e) {
assertThat(e.getMessage(), false, equalTo(true));
}
-
- serviceA.removeHandler("sayHello");
}
public void testThreadContext() throws ExecutionException, InterruptedException {
@@ -279,8 +279,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
assertEquals("ping_user", threadPool.getThreadContext().getHeader("test.ping.user"));
assertSame(context, threadPool.getThreadContext().getTransient("my_private_context"));
assertNull("this header is only visible in the handler context", threadPool.getThreadContext().getHeader("some.temp.header"));
-
- serviceA.removeHandler("sayHello");
}
public void testLocalNodeConnection() throws InterruptedException {
@@ -373,8 +371,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (Exception e) {
assertThat(e.getMessage(), false, equalTo(true));
}
-
- serviceA.removeHandler("sayHello");
}
public void testHelloWorldCompressed() {
@@ -424,8 +420,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (Exception e) {
assertThat(e.getMessage(), false, equalTo(true));
}
-
- serviceA.removeHandler("sayHello");
}
public void testErrorMessage() {
@@ -467,8 +461,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (Exception e) {
assertThat(e.getCause().getMessage(), equalTo("runtime_exception: bad message !!!"));
}
-
- serviceA.removeHandler("sayHelloException");
}
public void testDisconnectListener() throws Exception {
@@ -535,7 +527,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
listener.actionGet();
} catch (Exception e) {
- logger.trace("caught exception while sending to node {}", e, nodeA);
+ logger.trace(
+ (Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", nodeA), e);
}
}
}
@@ -570,7 +563,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (ConnectTransportException e) {
// ok!
} catch (Exception e) {
- logger.error("caught exception while sending to node {}", e, node);
+ logger.error(
+ (Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", node), e);
sendingErrors.add(e);
}
}
@@ -631,7 +625,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (TransportException ex) {
}
- serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
}
public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception {
@@ -674,8 +667,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (Exception e) {
assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
}
-
- serviceA.removeHandler("sayHelloTimeoutNoResponse");
}
public void testTimeoutSendExceptionWithDelayedResponse() throws Exception {
@@ -781,13 +772,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
for (Runnable runnable : assertions) {
runnable.run();
}
- serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
waitForever.countDown();
doneWaitingForever.await();
assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
}
- @TestLogging(value = "test.transport.tracer:TRACE")
+ @TestLogging(value = "org.elasticsearch.test.transport.tracer:TRACE")
public void testTracerLog() throws InterruptedException {
TransportRequestHandler handler = new TransportRequestHandler<StringMessageRequest>() {
@Override
@@ -1321,8 +1311,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (ConnectTransportException e) {
// all is well
}
-
- serviceA.removeHandler("sayHello");
}
public void testMockUnresponsiveRule() {
@@ -1381,8 +1369,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
} catch (ConnectTransportException e) {
// all is well
}
-
- serviceA.removeHandler("sayHello");
}
@@ -1684,7 +1670,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
@Override
public void handleException(TransportException exp) {
- logger.debug("---> received exception for id {}", exp, id);
+ logger.debug((Supplier<?>) () -> new ParameterizedMessage("---> received exception for id {}", id), exp);
allRequestsDone.countDown();
Throwable unwrap = ExceptionsHelper.unwrap(exp, IOException.class);
assertNotNull(unwrap);
@@ -1717,4 +1703,16 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
serviceC.close();
}
+
+ public void testRegisterHandlerTwice() {
+ serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC),
+ (request, message) -> {throw new AssertionError("boom");});
+ expectThrows(IllegalArgumentException.class, () ->
+ serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC),
+ (request, message) -> {throw new AssertionError("boom");})
+ );
+
+ serviceA.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC),
+ (request, message) -> {throw new AssertionError("boom");});
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java
index da7fcc5341..a0c81cb63d 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java
@@ -98,7 +98,7 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
socket.bind(address);
socket.setReuseAddress(TCP_REUSE_ADDRESS.get(settings()));
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
- if (tcpReceiveBufferSize.bytes() > 0) {
+ if (tcpReceiveBufferSize.getBytes() > 0) {
socket.setReceiveBufferSize(tcpReceiveBufferSize.bytesAsInt());
}
MockChannel serverMockChannel = new MockChannel(socket, name);
@@ -210,11 +210,11 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
private void configureSocket(Socket socket) throws SocketException {
socket.setTcpNoDelay(TCP_NO_DELAY.get(settings));
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
- if (tcpSendBufferSize.bytes() > 0) {
+ if (tcpSendBufferSize.getBytes() > 0) {
socket.setSendBufferSize(tcpSendBufferSize.bytesAsInt());
}
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
- if (tcpReceiveBufferSize.bytes() > 0) {
+ if (tcpReceiveBufferSize.getBytes() > 0) {
socket.setReceiveBufferSize(tcpReceiveBufferSize.bytesAsInt());
}
socket.setReuseAddress(TCP_REUSE_ADDRESS.get(settings()));
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransportPlugin.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransportPlugin.java
index 3e17cdcb30..d48e4bb7f7 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransportPlugin.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransportPlugin.java
@@ -18,14 +18,30 @@
*/
package org.elasticsearch.transport;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.threadpool.ThreadPool;
-public class MockTcpTransportPlugin extends Plugin {
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+
+public class MockTcpTransportPlugin extends Plugin implements NetworkPlugin {
public static final String MOCK_TCP_TRANSPORT_NAME = "mock-socket-network";
- public void onModule(NetworkModule module) {
- module.registerTransport(MOCK_TCP_TRANSPORT_NAME, MockTcpTransport.class);
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ return Collections.singletonMap(MOCK_TCP_TRANSPORT_NAME,
+ () -> new MockTcpTransport(settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService));
}
}
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java
index 2b0f551dbb..a198ef7795 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java
@@ -37,5 +37,4 @@ public class MockTransportClient extends TransportClient {
public MockTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
super(settings, DEFAULT_SETTINGS, plugins);
}
-
}
diff --git a/test/framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties
deleted file mode 100644
index 87d4560f72..0000000000
--- a/test/framework/src/main/resources/log4j.properties
+++ /dev/null
@@ -1,9 +0,0 @@
-tests.es.logger.level=INFO
-log4j.rootLogger=${tests.es.logger.level}, out
-
-log4j.logger.org.apache.http=INFO, out
-log4j.additivity.org.apache.http=false
-
-log4j.appender.out=org.apache.log4j.ConsoleAppender
-log4j.appender.out.layout=org.apache.log4j.PatternLayout
-log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
diff --git a/test/framework/src/main/resources/log4j2-test.properties b/test/framework/src/main/resources/log4j2-test.properties
new file mode 100644
index 0000000000..f573cace79
--- /dev/null
+++ b/test/framework/src/main/resources/log4j2-test.properties
@@ -0,0 +1,10 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+rootLogger.level = ${sys:tests.es.logger.level:-info}
+rootLogger.appenderRef.console.ref = console
+ e \ No newline at end of file
diff --git a/core/src/main/java/org/apache/log4j/package-info.java b/test/framework/src/test/java/Dummy.java
index f628016aa6..927189478b 100644
--- a/core/src/main/java/org/apache/log4j/package-info.java
+++ b/test/framework/src/test/java/Dummy.java
@@ -17,7 +17,5 @@
* under the License.
*/
-/**
- * Hack to fix Log4j 1.2 in Java 9.
- */
-package org.apache.log4j;
+class Dummy {
+}
diff --git a/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java
new file mode 100644
index 0000000000..0f4fbc571c
--- /dev/null
+++ b/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.util.set.Sets;
+import org.hamcrest.Matcher;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.util.Collections.singleton;
+import static org.elasticsearch.test.AbstractQueryTestCase.alterateQueries;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.hasEntry;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ * Various test for {@link org.elasticsearch.test.AbstractQueryTestCase}
+ */
+public class AbstractQueryTestCaseTests extends ESTestCase {
+
+ public void testAlterateQueries() throws IOException {
+ List<Tuple<String, Boolean>> alterations = alterateQueries(singleton("{\"field\": \"value\"}"), null);
+ assertAlterations(alterations, allOf(notNullValue(), hasEntry("{\"newField\":{\"field\":\"value\"}}", true)));
+
+ alterations = alterateQueries(singleton("{\"term\":{\"field\": \"value\"}}"), null);
+ assertAlterations(alterations, allOf(
+ hasEntry("{\"newField\":{\"term\":{\"field\":\"value\"}}}", true),
+ hasEntry("{\"term\":{\"newField\":{\"field\":\"value\"}}}", true))
+ );
+
+ alterations = alterateQueries(singleton("{\"bool\":{\"must\": [{\"match\":{\"field\":\"value\"}}]}}"), null);
+ assertAlterations(alterations, allOf(
+ hasEntry("{\"newField\":{\"bool\":{\"must\":[{\"match\":{\"field\":\"value\"}}]}}}", true),
+ hasEntry("{\"bool\":{\"newField\":{\"must\":[{\"match\":{\"field\":\"value\"}}]}}}", true),
+ hasEntry("{\"bool\":{\"must\":[{\"newField\":{\"match\":{\"field\":\"value\"}}}]}}", true),
+ hasEntry("{\"bool\":{\"must\":[{\"match\":{\"newField\":{\"field\":\"value\"}}}]}}", true)
+ ));
+
+ alterations = alterateQueries(singleton("{\"function_score\":" +
+ "{\"query\": {\"term\":{\"foo\": \"bar\"}}, \"script_score\": {\"script\":\"a + 1\", \"params\": {\"a\":0}}}}"), null);
+ assertAlterations(alterations, allOf(
+ hasEntry("{\"newField\":{\"function_score\":{\"query\":{\"term\":{\"foo\":\"bar\"}},\"script_score\":{\"script\":\"a + " +
+ "1\",\"params\":{\"a\":0}}}}}", true),
+ hasEntry("{\"function_score\":{\"newField\":{\"query\":{\"term\":{\"foo\":\"bar\"}},\"script_score\":{\"script\":\"a + " +
+ "1\",\"params\":{\"a\":0}}}}}", true),
+ hasEntry("{\"function_score\":{\"query\":{\"newField\":{\"term\":{\"foo\":\"bar\"}}},\"script_score\":{\"script\":\"a + " +
+ "1\",\"params\":{\"a\":0}}}}", true),
+ hasEntry("{\"function_score\":{\"query\":{\"term\":{\"newField\":{\"foo\":\"bar\"}}},\"script_score\":{\"script\":\"a + " +
+ "1\",\"params\":{\"a\":0}}}}", true),
+ hasEntry("{\"function_score\":{\"query\":{\"term\":{\"foo\":\"bar\"}},\"script_score\":{\"newField\":{\"script\":\"a + " +
+ "1\",\"params\":{\"a\":0}}}}}", true),
+ hasEntry("{\"function_score\":{\"query\":{\"term\":{\"foo\":\"bar\"}},\"script_score\":{\"script\":\"a + 1\"," +
+ "\"params\":{\"newField\":{\"a\":0}}}}}", true)
+ ));
+ }
+
+ public void testAlterateQueriesWithArbitraryContent() throws IOException {
+ Set<String> arbitraryContentHolders = Sets.newHashSet("params", "doc");
+ Set<String> queries = Sets.newHashSet(
+ "{\"query\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}",
+ "{\"query\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"c\":\"d\"}}}}}"
+ );
+
+ List<Tuple<String, Boolean>> alterations = alterateQueries(queries, arbitraryContentHolders);
+ assertAlterations(alterations, allOf(
+ hasEntry("{\"newField\":{\"query\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}}", true),
+ hasEntry("{\"query\":{\"newField\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}}", true),
+ hasEntry("{\"query\":{\"script\":\"test\",\"params\":{\"newField\":{\"foo\":\"bar\"}}}}", false)
+ ));
+ assertAlterations(alterations, allOf(
+ hasEntry("{\"newField\":{\"query\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"c\":\"d\"}}}}}}", true),
+ hasEntry("{\"query\":{\"newField\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"c\":\"d\"}}}}}}", true),
+ hasEntry("{\"query\":{\"more_like_this\":{\"newField\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"c\":\"d\"}}}}}}", true),
+ hasEntry("{\"query\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"newField\":{\"doc\":{\"c\":\"d\"}}}}}}", true),
+ hasEntry("{\"query\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"newField\":{\"c\":\"d\"}}}}}}", false)
+ ));
+ }
+
+ private static <K, V> void assertAlterations(List<Tuple<K, V>> alterations, Matcher<Map<K, V>> matcher) {
+ assertThat(alterations.stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)), matcher);
+ }
+}
diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/FileUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/FileUtilsTests.java
index 4387bf164f..457152381b 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/FileUtilsTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/FileUtilsTests.java
@@ -31,6 +31,7 @@ import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.Matchers.greaterThan;
public class FileUtilsTests extends ESTestCase {
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20240")
public void testLoadSingleYamlSuite() throws Exception {
Map<String,Set<Path>> yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "/rest-api-spec/test/suite1/10_basic");
assertSingleFile(yamlSuites, "suite1", "10_basic.yaml");
@@ -44,6 +45,7 @@ public class FileUtilsTests extends ESTestCase {
assertSingleFile(yamlSuites, "suite1", "10_basic.yaml");
}
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20240")
public void testLoadMultipleYamlSuites() throws Exception {
//single directory
Map<String,Set<Path>> yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "suite1");
diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java
index 5559fd3f38..1f97a6ed13 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java
@@ -22,8 +22,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.rest.yaml.ObjectPath;
-import org.elasticsearch.test.rest.yaml.Stash;
import java.io.IOException;
import java.util.HashMap;
diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
index bb0722365c..f5f1cb77a7 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
@@ -19,7 +19,9 @@
package org.elasticsearch.test.test;
-import org.elasticsearch.common.logging.ESLogger;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
@@ -30,37 +32,47 @@ import org.junit.runner.Result;
import java.lang.reflect.Method;
import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.nullValue;
public class LoggingListenerTests extends ESTestCase {
+
+ public void testTestRunStartedSupportsClassInDefaultPackage() throws Exception {
+ LoggingListener loggingListener = new LoggingListener();
+ Description description = Description.createTestDescription(Class.forName("Dummy"), "dummy");
+
+ // Will throw an exception without the check for testClassPackage != null in testRunStarted
+ loggingListener.testRunStarted(description);
+ }
+
public void testCustomLevelPerMethod() throws Exception {
LoggingListener loggingListener = new LoggingListener();
Description suiteDescription = Description.createSuiteDescription(TestClass.class);
- ESLogger abcLogger = Loggers.getLogger("abc");
- ESLogger xyzLogger = Loggers.getLogger("xyz");
+ Logger xyzLogger = Loggers.getLogger("xyz");
+ Logger abcLogger = Loggers.getLogger("abc");
+
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
- assertThat(abcLogger.getLevel(), nullValue());
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
loggingListener.testRunStarted(suiteDescription);
- assertThat(xyzLogger.getLevel(), nullValue());
- assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
Method method = TestClass.class.getMethod("annotatedTestMethod");
TestLogging annotation = method.getAnnotation(TestLogging.class);
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
loggingListener.testStarted(testDescription);
- assertThat(xyzLogger.getLevel(), equalTo("TRACE"));
- assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
+ assertThat(abcLogger.getLevel(), equalTo(level));
loggingListener.testFinished(testDescription);
- assertThat(xyzLogger.getLevel(), nullValue());
- assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
loggingListener.testRunFinished(new Result());
- assertThat(xyzLogger.getLevel(), nullValue());
- assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
}
public void testCustomLevelPerClass() throws Exception {
@@ -68,27 +80,29 @@ public class LoggingListenerTests extends ESTestCase {
Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class);
- ESLogger abcLogger = Loggers.getLogger("abc");
- ESLogger xyzLogger = Loggers.getLogger("xyz");
+ Logger abcLogger = Loggers.getLogger("abc");
+ Logger xyzLogger = Loggers.getLogger("xyz");
- assertThat(xyzLogger.getLevel(), nullValue());
- assertThat(abcLogger.getLevel(), nullValue());
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
+
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
loggingListener.testRunStarted(suiteDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test");
loggingListener.testStarted(testDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testFinished(testDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testRunFinished(new Result());
- assertThat(abcLogger.getLevel(), nullValue());
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
}
public void testCustomLevelPerClassAndPerMethod() throws Exception {
@@ -96,49 +110,56 @@ public class LoggingListenerTests extends ESTestCase {
Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class);
- ESLogger abcLogger = Loggers.getLogger("abc");
- ESLogger xyzLogger = Loggers.getLogger("xyz");
+ Logger abcLogger = Loggers.getLogger("abc");
+ Logger xyzLogger = Loggers.getLogger("xyz");
+
+ final Level level = ESLoggerFactory.getRootLogger().getLevel();
- assertThat(xyzLogger.getLevel(), nullValue());
- assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
loggingListener.testRunStarted(suiteDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Method method = TestClass.class.getMethod("annotatedTestMethod");
TestLogging annotation = method.getAnnotation(TestLogging.class);
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
loggingListener.testStarted(testDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), equalTo("TRACE"));
+ assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testFinished(testDescription);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Method method2 = TestClass.class.getMethod("annotatedTestMethod2");
TestLogging annotation2 = method2.getAnnotation(TestLogging.class);
Description testDescription2 = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod2", annotation2);
loggingListener.testStarted(testDescription2);
- assertThat(abcLogger.getLevel(), equalTo("TRACE"));
- assertThat(xyzLogger.getLevel(), equalTo("DEBUG"));
+ assertThat(xyzLogger.getLevel(), equalTo(Level.DEBUG));
+ assertThat(abcLogger.getLevel(), equalTo(Level.TRACE));
loggingListener.testFinished(testDescription2);
- assertThat(abcLogger.getLevel(), equalTo("ERROR"));
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testRunFinished(new Result());
- assertThat(abcLogger.getLevel(), nullValue());
- assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), equalTo(level));
+ assertThat(abcLogger.getLevel(), equalTo(level));
}
- @TestLogging("abc:ERROR")
+ /**
+ * dummy class used to create a junit suite description that has the @TestLogging annotation
+ */
+ @TestLogging("abc:WARN")
public static class AnnotatedTestClass {
- //dummy class used to create a junit suite description that has the @TestLogging annotation
+
}
+ /**
+ * dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it
+ */
public static class TestClass {
- //dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it
@SuppressWarnings("unused")
@TestLogging("xyz:TRACE")
@@ -147,5 +168,7 @@ public class LoggingListenerTests extends ESTestCase {
@SuppressWarnings("unused")
@TestLogging("abc:TRACE,xyz:DEBUG")
public void annotatedTestMethod2() {}
+
}
+
}
diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java
index 4ee7b0c7b2..e6a563b3e8 100644
--- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java
@@ -34,7 +34,8 @@ public class MockTcpTransportTests extends AbstractSimpleTransportTestCase {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version);
- MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool);
+ MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR);
mockTransportService.start();
return mockTransportService;
}
diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle
index 1a5815cf76..98fe76bfcd 100644
--- a/test/logger-usage/build.gradle
+++ b/test/logger-usage/build.gradle
@@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
dependencies {
compile 'org.ow2.asm:asm-debug-all:5.0.4' // use asm-debug-all as asm-all is broken
+ compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
testCompile "org.elasticsearch.test:framework:${version}"
}
@@ -30,4 +31,17 @@ forbiddenApisMain.enabled = true // disabled by parent project
forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures
}
-jarHell.enabled = true // disabled by parent project \ No newline at end of file
+jarHell.enabled = true // disabled by parent project
+
+thirdPartyAudit.excludes = [
+ // log4j
+ 'org.osgi.framework.AdaptPermission',
+ 'org.osgi.framework.AdminPermission',
+ 'org.osgi.framework.Bundle',
+ 'org.osgi.framework.BundleActivator',
+ 'org.osgi.framework.BundleContext',
+ 'org.osgi.framework.BundleEvent',
+ 'org.osgi.framework.SynchronousBundleListener',
+ 'org.osgi.framework.wiring.BundleWire',
+ 'org.osgi.framework.wiring.BundleWiring'
+]
diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
index 041d21cc76..928cc0a1fb 100644
--- a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
+++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
@@ -19,6 +19,10 @@
package org.elasticsearch.test.loggerusage;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.Marker;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.Supplier;
import org.objectweb.asm.AnnotationVisitor;
import org.objectweb.asm.ClassReader;
import org.objectweb.asm.ClassVisitor;
@@ -52,9 +56,16 @@ import java.util.function.Consumer;
import java.util.function.Predicate;
public class ESLoggerUsageChecker {
- public static final String LOGGER_CLASS = "org.elasticsearch.common.logging.ESLogger";
- public static final String THROWABLE_CLASS = "java.lang.Throwable";
- public static final List<String> LOGGER_METHODS = Arrays.asList("trace", "debug", "info", "warn", "error");
+ public static final Type LOGGER_CLASS = Type.getType(Logger.class);
+ public static final Type THROWABLE_CLASS = Type.getType(Throwable.class);
+ public static final Type STRING_CLASS = Type.getType(String.class);
+ public static final Type STRING_ARRAY_CLASS = Type.getType(String[].class);
+ public static final Type PARAMETERIZED_MESSAGE_CLASS = Type.getType(ParameterizedMessage.class);
+ public static final Type OBJECT_CLASS = Type.getType(Object.class);
+ public static final Type OBJECT_ARRAY_CLASS = Type.getType(Object[].class);
+ public static final Type SUPPLIER_ARRAY_CLASS = Type.getType(Supplier[].class);
+ public static final Type MARKER_CLASS = Type.getType(Marker.class);
+ public static final List<String> LOGGER_METHODS = Arrays.asList("trace", "debug", "info", "warn", "error", "fatal");
public static final String IGNORE_CHECKS_ANNOTATION = "org.elasticsearch.common.SuppressLoggerChecks";
@SuppressForbidden(reason = "command line tool")
@@ -143,7 +154,7 @@ public class ESLoggerUsageChecker {
simpleClassName = simpleClassName + ".java";
StringBuilder sb = new StringBuilder();
sb.append("Bad usage of ");
- sb.append(LOGGER_CLASS).append("#").append(logMethodName);
+ sb.append(LOGGER_CLASS.getClassName()).append("#").append(logMethodName);
sb.append(": ");
sb.append(errorMessage);
sb.append("\n\tat ");
@@ -230,7 +241,7 @@ public class ESLoggerUsageChecker {
} catch (AnalyzerException e) {
throw new RuntimeException("Internal error: failed in analysis step", e);
}
- Frame<BasicValue>[] stringFrames = stringPlaceHolderAnalyzer.getFrames();
+ Frame<BasicValue>[] logMessageFrames = stringPlaceHolderAnalyzer.getFrames();
Frame<BasicValue>[] arraySizeFrames = arraySizeAnalyzer.getFrames();
AbstractInsnNode[] insns = methodNode.instructions.toArray();
int lineNumber = -1;
@@ -240,53 +251,141 @@ public class ESLoggerUsageChecker {
LineNumberNode lineNumberNode = (LineNumberNode) insn;
lineNumber = lineNumberNode.line;
}
- if (insn.getOpcode() == Opcodes.INVOKEVIRTUAL) {
+ if (insn.getOpcode() == Opcodes.INVOKEINTERFACE) {
MethodInsnNode methodInsn = (MethodInsnNode) insn;
- if (Type.getObjectType(methodInsn.owner).getClassName().equals(LOGGER_CLASS) == false) {
- continue;
- }
- if (LOGGER_METHODS.contains(methodInsn.name) == false) {
- continue;
- }
- BasicValue varArgsSizeObject = getStackValue(arraySizeFrames[i], 0); // last argument
- if (varArgsSizeObject instanceof ArraySizeBasicValue == false) {
- wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
- "Could not determine size of varargs array"));
- continue;
- }
- ArraySizeBasicValue varArgsSize = (ArraySizeBasicValue) varArgsSizeObject;
- Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
- BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument
- if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
- if (varArgsSize.minValue > 0) {
- wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
- "First argument must be a string constant so that we can statically ensure proper place holder usage"));
+ if (Type.getObjectType(methodInsn.owner).equals(LOGGER_CLASS)) {
+ if (LOGGER_METHODS.contains(methodInsn.name) == false) {
continue;
+ }
+
+ Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
+ int markerOffset = 0;
+ if (argumentTypes[0].equals(MARKER_CLASS)) {
+ markerOffset = 1;
+ }
+
+ int lengthWithoutMarker = argumentTypes.length - markerOffset;
+
+ if (lengthWithoutMarker == 2 &&
+ argumentTypes[markerOffset + 0].equals(STRING_CLASS) &&
+ (argumentTypes[markerOffset + 1].equals(OBJECT_ARRAY_CLASS) ||
+ argumentTypes[markerOffset + 1].equals(SUPPLIER_ARRAY_CLASS))) {
+ // VARARGS METHOD: debug(Marker?, String, (Object...|Supplier...))
+ checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, markerOffset + 0,
+ markerOffset + 1);
+ } else if (lengthWithoutMarker >= 2 &&
+ argumentTypes[markerOffset + 0].equals(STRING_CLASS) &&
+ argumentTypes[markerOffset + 1].equals(OBJECT_CLASS)) {
+ // MULTI-PARAM METHOD: debug(Marker?, String, Object p0, ...)
+ checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, markerOffset + 0,
+ lengthWithoutMarker - 1);
+ } else if ((lengthWithoutMarker == 1 || lengthWithoutMarker == 2) &&
+ lengthWithoutMarker == 2 ? argumentTypes[markerOffset + 1].equals(THROWABLE_CLASS) : true) {
+ // all the rest: debug(Marker?, (Message|MessageSupplier|CharSequence|Object|String|Supplier), Throwable?)
+ checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, markerOffset + 0, 0);
} else {
- // don't check logger usage for logger.warn(someObject) as someObject will be fully logged
- continue;
+ throw new IllegalStateException("Method invoked on " + LOGGER_CLASS.getClassName() +
+ " that is not supported by logger usage checker");
}
}
- PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
- if (logMessageLength.minValue != logMessageLength.maxValue) {
- wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
- "Multiple log messages with conflicting number of place holders"));
- continue;
- }
- if (varArgsSize.minValue != varArgsSize.maxValue) {
- wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
- "Multiple parameter arrays with conflicting sizes"));
- continue;
- }
- assert logMessageLength.minValue == logMessageLength.maxValue && varArgsSize.minValue == varArgsSize.maxValue;
- if (logMessageLength.minValue != varArgsSize.minValue) {
- wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
- "Expected " + logMessageLength.minValue + " arguments but got " + varArgsSize.minValue));
- continue;
+ } else if (insn.getOpcode() == Opcodes.INVOKESPECIAL) { // constructor invocation
+ MethodInsnNode methodInsn = (MethodInsnNode) insn;
+ if (Type.getObjectType(methodInsn.owner).equals(PARAMETERIZED_MESSAGE_CLASS)) {
+ Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
+ if (argumentTypes.length == 2 &&
+ argumentTypes[0].equals(STRING_CLASS) &&
+ argumentTypes[1].equals(OBJECT_ARRAY_CLASS)) {
+ checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, 0, 1);
+ } else if (argumentTypes.length == 2 &&
+ argumentTypes[0].equals(STRING_CLASS) &&
+ argumentTypes[1].equals(OBJECT_CLASS)) {
+ checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, 0, 1);
+ } else if (argumentTypes.length == 3 &&
+ argumentTypes[0].equals(STRING_CLASS) &&
+ argumentTypes[1].equals(OBJECT_CLASS) &&
+ argumentTypes[2].equals(OBJECT_CLASS)) {
+ checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, 0, 2);
+ } else if (argumentTypes.length == 3 &&
+ argumentTypes[0].equals(STRING_CLASS) &&
+ argumentTypes[1].equals(OBJECT_ARRAY_CLASS) &&
+ argumentTypes[2].equals(THROWABLE_CLASS)) {
+ checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, 0, 1);
+ } else if (argumentTypes.length == 3 &&
+ argumentTypes[0].equals(STRING_CLASS) &&
+ argumentTypes[1].equals(STRING_ARRAY_CLASS) &&
+ argumentTypes[2].equals(THROWABLE_CLASS)) {
+ checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, 0, 1);
+ } else {
+ throw new IllegalStateException("Constructor invoked on " + PARAMETERIZED_MESSAGE_CLASS.getClassName() +
+ " that is not supported by logger usage checker");
+ }
}
}
}
}
+
+ private void checkFixedArityArgs(MethodNode methodNode, Frame<BasicValue> logMessageFrame, int lineNumber,
+ MethodInsnNode methodInsn, int messageIndex, int positionalArgsLength) {
+ PlaceHolderStringBasicValue logMessageLength = checkLogMessageConsistency(methodNode, logMessageFrame, lineNumber, methodInsn,
+ messageIndex, positionalArgsLength);
+ if (logMessageLength == null) {
+ return;
+ }
+ if (logMessageLength.minValue != positionalArgsLength) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "Expected " + logMessageLength.minValue + " arguments but got " + positionalArgsLength));
+ return;
+ }
+ }
+
+ private void checkArrayArgs(MethodNode methodNode, Frame<BasicValue> logMessageFrame, Frame<BasicValue> arraySizeFrame,
+ int lineNumber, MethodInsnNode methodInsn, int messageIndex, int arrayIndex) {
+ BasicValue arraySizeObject = getStackValue(arraySizeFrame, methodInsn, arrayIndex);
+ if (arraySizeObject instanceof ArraySizeBasicValue == false) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "Could not determine size of array"));
+ return;
+ }
+ ArraySizeBasicValue arraySize = (ArraySizeBasicValue) arraySizeObject;
+ PlaceHolderStringBasicValue logMessageLength = checkLogMessageConsistency(methodNode, logMessageFrame, lineNumber, methodInsn,
+ messageIndex, arraySize.minValue);
+ if (logMessageLength == null) {
+ return;
+ }
+ if (arraySize.minValue != arraySize.maxValue) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "Multiple parameter arrays with conflicting sizes"));
+ return;
+ }
+ assert logMessageLength.minValue == logMessageLength.maxValue && arraySize.minValue == arraySize.maxValue;
+ if (logMessageLength.minValue != arraySize.minValue) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "Expected " + logMessageLength.minValue + " arguments but got " + arraySize.minValue));
+ return;
+ }
+ }
+
+ private PlaceHolderStringBasicValue checkLogMessageConsistency(MethodNode methodNode, Frame<BasicValue> logMessageFrame,
+ int lineNumber, MethodInsnNode methodInsn, int messageIndex,
+ int argsSize) {
+ BasicValue logMessageLengthObject = getStackValue(logMessageFrame, methodInsn, messageIndex);
+ if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
+ if (argsSize > 0) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "First argument must be a string constant so that we can statically ensure proper place holder usage"));
+ } else {
+ // don't check logger usage for logger.warn(someObject)
+ }
+ return null;
+ }
+ PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
+ if (logMessageLength.minValue != logMessageLength.maxValue) {
+ wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
+ "Multiple log messages with conflicting number of place holders"));
+ return null;
+ }
+ return logMessageLength;
+ }
}
private static int calculateNumberOfPlaceHolders(String message) {
@@ -300,9 +399,10 @@ public class ESLoggerUsageChecker {
return count;
}
- private static BasicValue getStackValue(Frame<BasicValue> f, int index) {
+ private static BasicValue getStackValue(Frame<BasicValue> f, MethodInsnNode methodInsn, int index) {
+ int relIndex = Type.getArgumentTypes(methodInsn.desc).length - 1 - index;
int top = f.getStackSize() - 1;
- return index <= top ? f.getStack(top - index) : null;
+ return relIndex <= top ? f.getStack(top - relIndex) : null;
}
private static class IntMinMaxTrackingBasicValue extends BasicValue {
diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
index 73449f4351..ea60b0cf5b 100644
--- a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
+++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
@@ -19,21 +19,27 @@
package org.elasticsearch.test.loggerusage;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.Marker;
+import org.apache.logging.log4j.message.Message;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.logging.log4j.util.MessageSupplier;
+import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.SuppressLoggerChecks;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage;
import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage;
+import org.hamcrest.Matchers;
import java.io.IOException;
import java.io.InputStream;
+import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
-import java.util.function.Predicate;
+import java.util.stream.Stream;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasItem;
-import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class ESLoggerUsageTests extends ESTestCase {
@@ -44,11 +50,12 @@ public class ESLoggerUsageTests extends ESTestCase {
logger.info("Checking logger usage for method {}", method.getName());
InputStream classInputStream = getClass().getResourceAsStream(getClass().getSimpleName() + ".class");
List<WrongLoggerUsage> errors = new ArrayList<>();
- ESLoggerUsageChecker.check(errors::add, classInputStream, Predicate.isEqual(method.getName()));
+ ESLoggerUsageChecker.check(errors::add, classInputStream,
+ m -> m.equals(method.getName()) || m.startsWith("lambda$" + method.getName()));
if (method.getName().startsWith("checkFail")) {
- assertFalse("Expected " + method.getName() + " to have wrong ESLogger usage", errors.isEmpty());
+ assertFalse("Expected " + method.getName() + " to have wrong Logger usage", errors.isEmpty());
} else {
- assertTrue("Method " + method.getName() + " has unexpected ESLogger usage errors: " + errors, errors.isEmpty());
+ assertTrue("Method " + method.getName() + " has unexpected Logger usage errors: " + errors, errors.isEmpty());
}
} else {
assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test"));
@@ -57,26 +64,57 @@ public class ESLoggerUsageTests extends ESTestCase {
}
}
- public void testLoggerUsageCheckerCompatibilityWithESLogger() throws NoSuchMethodException {
- assertThat(ESLoggerUsageChecker.LOGGER_CLASS, equalTo(ESLogger.class.getName()));
- assertThat(ESLoggerUsageChecker.THROWABLE_CLASS, equalTo(Throwable.class.getName()));
- int varargsMethodCount = 0;
- for (Method method : ESLogger.class.getMethods()) {
- if (method.isVarArgs()) {
- // check that logger usage checks all varargs methods
- assertThat(ESLoggerUsageChecker.LOGGER_METHODS, hasItem(method.getName()));
- varargsMethodCount++;
+ public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchMethodException {
+ for (Method method : Logger.class.getMethods()) {
+ if (ESLoggerUsageChecker.LOGGER_METHODS.contains(method.getName())) {
+ assertThat(method.getParameterTypes().length, greaterThanOrEqualTo(1));
+ int markerOffset = method.getParameterTypes()[0].equals(Marker.class) ? 1 : 0;
+ int paramLength = method.getParameterTypes().length - markerOffset;
+ if (method.isVarArgs()) {
+ assertEquals(2, paramLength);
+ assertEquals(String.class, method.getParameterTypes()[markerOffset]);
+ assertThat(method.getParameterTypes()[markerOffset + 1], Matchers.<Class<?>>isOneOf(Object[].class, Supplier[].class));
+ } else {
+ assertThat(method.getParameterTypes()[markerOffset], Matchers.<Class<?>>isOneOf(Message.class, MessageSupplier.class,
+ CharSequence.class, Object.class, String.class, Supplier.class));
+
+ if (paramLength == 2) {
+ assertThat(method.getParameterTypes()[markerOffset + 1], Matchers.<Class<?>>isOneOf(Throwable.class, Object.class));
+ if (method.getParameterTypes()[markerOffset + 1].equals(Object.class)) {
+ assertEquals(String.class, method.getParameterTypes()[markerOffset]);
+ }
+ }
+ if (paramLength > 2) {
+ assertEquals(String.class, method.getParameterTypes()[markerOffset]);
+ assertThat(paramLength, lessThanOrEqualTo(11));
+ for (int i = 1; i < paramLength; i++) {
+ assertEquals(Object.class, method.getParameterTypes()[markerOffset + i]);
+ }
+ }
+ }
}
}
- // currently we have two overloaded methods for each of debug, info, ...
- // if that changes, we might want to have another look at the usage checker
- assertThat(varargsMethodCount, equalTo(ESLoggerUsageChecker.LOGGER_METHODS.size() * 2));
- // check that signature is same as we expect in the usage checker
for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) {
- assertThat(ESLogger.class.getMethod(methodName, String.class, Object[].class), notNullValue());
- assertThat(ESLogger.class.getMethod(methodName, String.class, Throwable.class, Object[].class), notNullValue());
+ assertEquals(48, Stream.of(Logger.class.getMethods()).filter(m -> methodName.equals(m.getName())).count());
}
+
+ for (Constructor<?> constructor : ParameterizedMessage.class.getConstructors()) {
+ assertThat(constructor.getParameterTypes().length, greaterThanOrEqualTo(2));
+ assertEquals(String.class, constructor.getParameterTypes()[0]);
+ assertThat(constructor.getParameterTypes()[1], Matchers.<Class<?>>isOneOf(String[].class, Object[].class, Object.class));
+
+ if (constructor.getParameterTypes().length > 2) {
+ assertEquals(3, constructor.getParameterTypes().length);
+ if (constructor.getParameterTypes()[1].equals(Object.class)) {
+ assertEquals(Object.class, constructor.getParameterTypes()[2]);
+ } else {
+ assertEquals(Throwable.class, constructor.getParameterTypes()[2]);
+ }
+ }
+ }
+
+ assertEquals(5, ParameterizedMessage.class.getConstructors().length);
}
public void checkNumberOfArguments1() {
@@ -101,7 +139,6 @@ public class ESLoggerUsageTests extends ESTestCase {
}
public void checkNumberOfArguments3() {
- // long argument list (> 5), emits different bytecode
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, new String("last arg"));
}
@@ -109,12 +146,36 @@ public class ESLoggerUsageTests extends ESTestCase {
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, 7, new String("last arg"));
}
+ public void checkNumberOfArgumentsParameterizedMessage1() {
+ logger.info(new ParameterizedMessage("Hello {}, {}, {}", "world", 2, "third argument"));
+ }
+
+ public void checkFailNumberOfArgumentsParameterizedMessage1() {
+ logger.info(new ParameterizedMessage("Hello {}, {}", "world", 2, "third argument"));
+ }
+
+ public void checkNumberOfArgumentsParameterizedMessage2() {
+ logger.info(new ParameterizedMessage("Hello {}, {}", "world", 2));
+ }
+
+ public void checkFailNumberOfArgumentsParameterizedMessage2() {
+ logger.info(new ParameterizedMessage("Hello {}, {}, {}", "world", 2));
+ }
+
+ public void checkNumberOfArgumentsParameterizedMessage3() {
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}, {}", "world", 2, "third argument"));
+ }
+
+ public void checkFailNumberOfArgumentsParameterizedMessage3() {
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}", "world", 2, "third argument"));
+ }
+
public void checkOrderOfExceptionArgument() {
logger.info("Hello", new Exception());
}
public void checkOrderOfExceptionArgument1() {
- logger.info("Hello {}", new Exception(), "world");
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}", "world"), new Exception());
}
public void checkFailOrderOfExceptionArgument1() {
@@ -122,7 +183,7 @@ public class ESLoggerUsageTests extends ESTestCase {
}
public void checkOrderOfExceptionArgument2() {
- logger.info("Hello {}, {}", new Exception(), "world", 42);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}", "world", 42), new Exception());
}
public void checkFailOrderOfExceptionArgument2() {
@@ -134,7 +195,7 @@ public class ESLoggerUsageTests extends ESTestCase {
}
public void checkFailNonConstantMessageWithArguments(boolean b) {
- logger.info(Boolean.toString(b), new Exception(), 42);
+ logger.info((Supplier<?>) () -> new ParameterizedMessage(Boolean.toString(b), 42), new Exception());
}
public void checkComplexUsage(boolean b) {
@@ -166,4 +227,5 @@ public class ESLoggerUsageTests extends ESTestCase {
}
logger.info(message, args);
}
+
}